summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig32
-rw-r--r--drivers/acpi/acpi_lpss.c139
-rw-r--r--drivers/acpi/acpi_video.c14
-rw-r--r--drivers/acpi/acpica/acapps.h3
-rw-r--r--drivers/acpi/acpica/acdebug.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h82
-rw-r--r--drivers/acpi/acpica/aclocal.h15
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h3
-rw-r--r--drivers/acpi/acpica/acutils.h23
-rw-r--r--drivers/acpi/acpica/dbexec.c110
-rw-r--r--drivers/acpi/acpica/dbfileio.c4
-rw-r--r--drivers/acpi/acpica/dbinput.c145
-rw-r--r--drivers/acpi/acpica/dscontrol.c18
-rw-r--r--drivers/acpi/acpica/dsfield.c28
-rw-r--r--drivers/acpi/acpica/dsobject.c4
-rw-r--r--drivers/acpi/acpica/dspkginit.c21
-rw-r--r--drivers/acpi/acpica/dsutils.c3
-rw-r--r--drivers/acpi/acpica/dswload.c6
-rw-r--r--drivers/acpi/acpica/dswload2.c13
-rw-r--r--drivers/acpi/acpica/evregion.c10
-rw-r--r--drivers/acpi/acpica/exdump.c11
-rw-r--r--drivers/acpi/acpica/hwtimer.c29
-rw-r--r--drivers/acpi/acpica/hwvalid.c14
-rw-r--r--drivers/acpi/acpica/nsaccess.c13
-rw-r--r--drivers/acpi/acpica/nsconvert.c3
-rw-r--r--drivers/acpi/acpica/nsnames.c149
-rw-r--r--drivers/acpi/acpica/nssearch.c1
-rw-r--r--drivers/acpi/acpica/nsxfeval.c9
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psobject.c10
-rw-r--r--drivers/acpi/acpica/psutils.c14
-rw-r--r--drivers/acpi/acpica/utdebug.c18
-rw-r--r--drivers/acpi/acpica/utdecode.c11
-rw-r--r--drivers/acpi/acpica/uterror.c73
-rw-r--r--drivers/acpi/acpica/utinit.c1
-rw-r--r--drivers/acpi/acpica/utmath.c4
-rw-r--r--drivers/acpi/acpica/utmutex.c9
-rw-r--r--drivers/acpi/acpica/utnonansi.c11
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utstrsuppt.c42
-rw-r--r--drivers/acpi/acpica/uttrack.c6
-rw-r--r--drivers/acpi/acpica/utxferror.c8
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/apei/ghes.c79
-rw-r--r--drivers/acpi/battery.c36
-rw-r--r--drivers/acpi/button.c22
-rw-r--r--drivers/acpi/cppc_acpi.c2
-rw-r--r--drivers/acpi/device_pm.c29
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/evged.c47
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/nfit/core.c9
-rw-r--r--drivers/acpi/numa.c3
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pmic/intel_pmic_bxtwc.c9
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c5
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtwc.c9
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c7
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c7
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/acpi/sysfs.c26
-rw-r--r--drivers/acpi/utils.c41
-rw-r--r--drivers/android/binder.c44
-rw-r--r--drivers/ata/ahci_mtk.c6
-rw-r--r--drivers/ata/ahci_qoriq.c12
-rw-r--r--drivers/ata/libata-core.c13
-rw-r--r--drivers/ata/pata_pdc2027x.c16
-rw-r--r--drivers/base/Kconfig3
-rw-r--r--drivers/base/cacheinfo.c13
-rw-r--r--drivers/base/cpu.c48
-rw-r--r--drivers/base/power/domain.c69
-rw-r--r--drivers/base/power/main.c430
-rw-r--r--drivers/base/power/power.h11
-rw-r--r--drivers/base/power/runtime.c84
-rw-r--r--drivers/base/power/sysfs.c182
-rw-r--r--drivers/base/power/wakeirq.c8
-rw-r--r--drivers/base/power/wakeup.c26
-rw-r--r--drivers/base/regmap/Kconfig6
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h8
-rw-r--r--drivers/base/regmap/regcache-flat.c15
-rw-r--r--drivers/base/regmap/regmap-debugfs.c12
-rw-r--r--drivers/base/regmap/regmap-sdw.c88
-rw-r--r--drivers/base/regmap/regmap.c47
-rw-r--r--drivers/bcma/Kconfig2
-rw-r--r--drivers/block/DAC960.c160
-rw-r--r--drivers/block/Kconfig4
-rw-r--r--drivers/block/aoe/aoe.h3
-rw-r--r--drivers/block/aoe/aoecmd.c48
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/block/null_blk.c294
-rw-r--r--drivers/block/pktcdvd.c12
-rw-r--r--drivers/block/rbd.c18
-rw-r--r--drivers/block/smart1,2.h278
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/sunxi-rsb.c1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c44
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c7
-rw-r--r--drivers/clk/clk.c8
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c12
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/owl-timer.c5
-rw-r--r--drivers/clocksource/tcb_clksrc.c2
-rw-r--r--drivers/clocksource/timer-of.c84
-rw-r--r--drivers/clocksource/timer-of.h1
-rw-r--r--drivers/clocksource/timer-sprd.c159
-rw-r--r--drivers/clocksource/timer-stm32.c358
-rw-r--r--drivers/cpufreq/Kconfig.arm88
-rw-r--r--drivers/cpufreq/Makefile9
-rw-r--r--drivers/cpufreq/arm_big_little.c23
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c241
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c8
-rw-r--r--drivers/cpufreq/cpufreq-dt.c27
-rw-r--r--drivers/cpufreq/cpufreq.c55
-rw-r--r--drivers/cpufreq/cpufreq_governor.c19
-rw-r--r--drivers/cpufreq/cpufreq_stats.c3
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c182
-rw-r--r--drivers/cpufreq/intel_pstate.c14
-rw-r--r--drivers/cpufreq/longhaul.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c23
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c16
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c143
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c14
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c193
-rw-r--r--drivers/cpufreq/ti-cpufreq.c51
-rw-r--r--drivers/cpuidle/governor.c5
-rw-r--r--drivers/crypto/chelsio/Kconfig1
-rw-r--r--drivers/crypto/inside-secure/safexcel.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c85
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c89
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--drivers/devfreq/devfreq.c5
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/dma-jz4740.c4
-rw-r--r--drivers/dma/dmatest.c55
-rw-r--r--drivers/dma/fsl-edma.c28
-rw-r--r--drivers/dma/ioat/dma.c2
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/sh/rcar-dmac.c24
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/mv64x60_edac.c2
-rw-r--r--drivers/edac/octeon_edac-lmc.c1
-rw-r--r--drivers/edac/ti_edac.c341
-rw-r--r--drivers/extcon/extcon-axp288.c39
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c142
-rw-r--r--drivers/firmware/efi/Kconfig10
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/capsule-loader.c47
-rw-r--r--drivers/firmware/efi/cper-arm.c356
-rw-r--r--drivers/firmware/efi/cper.c122
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/psci_checker.c46
-rw-r--r--drivers/gpio/gpio-bcm-kona.c3
-rw-r--r--drivers/gpio/gpio-brcmstb.c4
-rw-r--r--drivers/gpio/gpio-merrifield.c11
-rw-r--r--drivers/gpio/gpio-mmio.c30
-rw-r--r--drivers/gpio/gpio-reg.c4
-rw-r--r--drivers/gpio/gpio-tegra.c4
-rw-r--r--drivers/gpio/gpio-xgene-sb.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpio/gpiolib-devprop.c17
-rw-r--r--drivers/gpio/gpiolib-of.c3
-rw-r--r--drivers/gpio/gpiolib.c62
-rw-r--r--drivers/gpio/gpiolib.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c51
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c47
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h2
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c38
-rw-r--r--drivers/gpu/drm/drm_connector.c63
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_edid.c52
-rw-r--r--drivers/gpu/drm/drm_lease.c26
-rw-r--r--drivers/gpu/drm/drm_mm.c8
-rw-r--r--drivers/gpu/drm/drm_mode_config.c5
-rw-r--r--drivers/gpu/drm/drm_plane.c42
-rw-r--r--drivers/gpu/drm/drm_syncobj.c77
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c3
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c22
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c35
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c322
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c3
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c83
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c49
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c46
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c20
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c4
-rw-r--r--drivers/gpu/drm/tegra/sor.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c37
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c41
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-cp2112.c15
-rw-r--r--drivers/hid/hid-holtekff.c8
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/Kconfig26
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c22
-rw-r--r--drivers/hwmon/coretemp.c3
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c54
-rw-r--r--drivers/hwmon/hih6130.c2
-rw-r--r--drivers/hwmon/hwmon.c33
-rw-r--r--drivers/hwmon/iio_hwmon.c3
-rw-r--r--drivers/hwmon/ina2xx.c90
-rw-r--r--drivers/hwmon/k10temp.c1
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig1
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c288
-rw-r--r--drivers/hwmon/pmbus/ir35221.c189
-rw-r--r--drivers/hwmon/pmbus/lm25066.c67
-rw-r--r--drivers/hwmon/pmbus/max31785.c294
-rw-r--r--drivers/hwmon/pmbus/pmbus.h41
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c273
-rw-r--r--drivers/hwmon/sht15.c1
-rw-r--r--drivers/hwmon/sht21.c2
-rw-r--r--drivers/hwmon/sht3x.c7
-rw-r--r--drivers/hwmon/w83773g.c329
-rw-r--r--drivers/hwtracing/stm/ftrace.c6
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c39
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32.h3
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c3
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c3
-rw-r--r--drivers/i2c/i2c-core-base.c6
-rw-r--r--drivers/i2c/i2c-core-smbus.c13
-rw-r--r--drivers/iio/adc/Kconfig37
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/sd_adc_modulator.c68
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c1205
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c302
-rw-r--r--drivers/iio/adc/stm32-dfsdm.h310
-rw-r--r--drivers/iio/buffer/Kconfig10
-rw-r--r--drivers/iio/buffer/Makefile1
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c11
-rw-r--r--drivers/iio/buffer/industrialio-hw-consumer.c247
-rw-r--r--drivers/iio/inkern.c17
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/core_priv.h2
-rw-r--r--drivers/infiniband/core/device.c20
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/nldev.c56
-rw-r--r--drivers/infiniband/core/security.c10
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c10
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c16
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c94
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h6
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h1
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c30
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c1
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c1
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c26
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c11
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c54
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h4
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h6
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c7
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c17
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c14
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c7
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c5
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c5
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/joystick/xpad.c19
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/twl4030-vibra.c6
-rw-r--r--drivers/input/misc/twl6040-vibra.c3
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/alps.c23
-rw-r--r--drivers/input/mouse/alps.h10
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/mouse/trackpoint.c245
-rw-r--r--drivers/input/mouse/trackpoint.h34
-rw-r--r--drivers/input/rmi4/rmi_driver.c4
-rw-r--r--drivers/input/rmi4/rmi_f01.c12
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c16
-rw-r--r--drivers/input/touchscreen/elants_i2c.c10
-rw-r--r--drivers/input/touchscreen/hideep.c3
-rw-r--r--drivers/input/touchscreen/of_touchscreen.c4
-rw-r--r--drivers/input/touchscreen/s6sy761.c15
-rw-r--r--drivers/input/touchscreen/stmfts.c15
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c17
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm2836.c46
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c4
-rw-r--r--drivers/irqchip/irq-gic-v3.c11
-rw-r--r--drivers/irqchip/irq-goldfish-pic.c139
-rw-r--r--drivers/irqchip/irq-ompic.c4
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c6
-rw-r--r--drivers/leds/led-core.c1
-rw-r--r--drivers/leds/leds-pm8058.c2
-rw-r--r--drivers/lightnvm/Kconfig7
-rw-r--r--drivers/lightnvm/Makefile1
-rw-r--r--drivers/lightnvm/core.c462
-rw-r--r--drivers/lightnvm/pblk-cache.c5
-rw-r--r--drivers/lightnvm/pblk-core.c55
-rw-r--r--drivers/lightnvm/pblk-gc.c23
-rw-r--r--drivers/lightnvm/pblk-init.c104
-rw-r--r--drivers/lightnvm/pblk-map.c2
-rw-r--r--drivers/lightnvm/pblk-rb.c111
-rw-r--r--drivers/lightnvm/pblk-read.c35
-rw-r--r--drivers/lightnvm/pblk-recovery.c43
-rw-r--r--drivers/lightnvm/pblk-rl.c54
-rw-r--r--drivers/lightnvm/pblk-sysfs.c15
-rw-r--r--drivers/lightnvm/pblk-write.c23
-rw-r--r--drivers/lightnvm/pblk.h163
-rw-r--r--drivers/lightnvm/rrpc.c1625
-rw-r--r--drivers/lightnvm/rrpc.h290
-rw-r--r--drivers/md/bcache/alloc.c19
-rw-r--r--drivers/md/bcache/bcache.h24
-rw-r--r--drivers/md/bcache/btree.c10
-rw-r--r--drivers/md/bcache/closure.c47
-rw-r--r--drivers/md/bcache/closure.h60
-rw-r--r--drivers/md/bcache/debug.c7
-rw-r--r--drivers/md/bcache/io.c13
-rw-r--r--drivers/md/bcache/movinggc.c2
-rw-r--r--drivers/md/bcache/request.c29
-rw-r--r--drivers/md/bcache/super.c27
-rw-r--r--drivers/md/bcache/util.c34
-rw-r--r--drivers/md/bcache/util.h1
-rw-r--r--drivers/md/bcache/writeback.c203
-rw-r--r--drivers/md/bcache/writeback.h12
-rw-r--r--drivers/md/dm-bufio.c8
-rw-r--r--drivers/md/dm-cache-target.c12
-rw-r--r--drivers/md/dm-crypt.c21
-rw-r--r--drivers/md/dm-integrity.c49
-rw-r--r--drivers/md/dm-mpath.c86
-rw-r--r--drivers/md/dm-rq.c28
-rw-r--r--drivers/md/dm-snap.c48
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm-thin-metadata.c6
-rw-r--r--drivers/md/dm-thin.c22
-rw-r--r--drivers/md/dm.c21
-rw-r--r--drivers/md/persistent-data/dm-btree.c19
-rw-r--r--drivers/memory/omap-gpmc.c163
-rw-r--r--drivers/memstick/host/Kconfig4
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c2
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c2
-rw-r--r--drivers/mfd/Kconfig41
-rw-r--r--drivers/mfd/Makefile7
-rw-r--r--drivers/mfd/ab8500-debugfs.c439
-rw-r--r--drivers/mfd/arizona-irq.c4
-rw-r--r--drivers/mfd/atmel-flexcom.c63
-rw-r--r--drivers/mfd/axp20x.c4
-rw-r--r--drivers/mfd/cros_ec.c4
-rw-r--r--drivers/mfd/cros_ec_dev.c (renamed from drivers/platform/chrome/cros_ec_dev.c)8
-rw-r--r--drivers/mfd/cros_ec_dev.h (renamed from drivers/platform/chrome/cros_ec_dev.h)0
-rw-r--r--drivers/mfd/cros_ec_spi.c78
-rw-r--r--drivers/mfd/intel-lpss.c6
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c1
-rw-r--r--drivers/mfd/kempld-core.c2
-rw-r--r--drivers/mfd/lpc_ich.c5
-rw-r--r--drivers/mfd/max77843.c1
-rw-r--r--drivers/mfd/palmas.c10
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/rave-sp.c710
-rw-r--r--drivers/mfd/stm32-lptimer.c6
-rw-r--r--drivers/mfd/stm32-timers.c4
-rw-r--r--drivers/mfd/syscon.c19
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c2
-rw-r--r--drivers/mfd/tmio_core.c20
-rw-r--r--drivers/mfd/twl4030-audio.c9
-rw-r--r--drivers/mfd/twl6040.c12
-rw-r--r--drivers/misc/Kconfig5
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cardreader/Kconfig20
-rw-r--r--drivers/misc/cardreader/Makefile4
-rw-r--r--drivers/misc/cardreader/rtl8411.c (renamed from drivers/mfd/rtl8411.c)2
-rw-r--r--drivers/misc/cardreader/rts5209.c (renamed from drivers/mfd/rts5209.c)2
-rw-r--r--drivers/misc/cardreader/rts5227.c (renamed from drivers/mfd/rts5227.c)2
-rw-r--r--drivers/misc/cardreader/rts5229.c (renamed from drivers/mfd/rts5229.c)2
-rw-r--r--drivers/misc/cardreader/rts5249.c (renamed from drivers/mfd/rts5249.c)3
-rw-r--r--drivers/misc/cardreader/rts5260.c748
-rw-r--r--drivers/misc/cardreader/rts5260.h45
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c (renamed from drivers/mfd/rtsx_pcr.c)128
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h (renamed from drivers/mfd/rtsx_pcr.h)12
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c (renamed from drivers/mfd/rtsx_usb.c)2
-rw-r--r--drivers/misc/eeprom/at24.c26
-rw-r--r--drivers/misc/pti.c2
-rw-r--r--drivers/mmc/core/block.c1385
-rw-r--r--drivers/mmc/core/block.h12
-rw-r--r--drivers/mmc/core/bus.c2
-rw-r--r--drivers/mmc/core/card.h2
-rw-r--r--drivers/mmc/core/core.c228
-rw-r--r--drivers/mmc/core/core.h47
-rw-r--r--drivers/mmc/core/host.h6
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/mmc_test.c221
-rw-r--r--drivers/mmc/core/queue.c504
-rw-r--r--drivers/mmc/core/queue.h64
-rw-r--r--drivers/mmc/core/quirks.h8
-rw-r--r--drivers/mmc/core/slot-gpio.c22
-rw-r--r--drivers/mmc/host/Kconfig26
-rw-r--r--drivers/mmc/host/Makefile11
-rw-r--r--drivers/mmc/host/android-goldfish.c2
-rw-r--r--drivers/mmc/host/cqhci.c1150
-rw-r--r--drivers/mmc/host/cqhci.h240
-rw-r--r--drivers/mmc/host/davinci_mmc.c17
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c2
-rw-r--r--drivers/mmc/host/mmci.c127
-rw-r--r--drivers/mmc/host/mmci.h6
-rw-r--r--drivers/mmc/host/renesas_sdhi.h22
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c52
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c15
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c37
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c2
-rw-r--r--drivers/mmc/host/s3cmci.c8
-rw-r--r--drivers/mmc/host/sdhci-acpi.c132
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c36
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c149
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c8
-rw-r--r--drivers/mmc/host/sdhci-pci-arasan.c331
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c218
-rw-r--r--drivers/mmc/host/sdhci-pci.h7
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci-xenon.c7
-rw-r--r--drivers/mmc/host/sdhci.c57
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c52
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.c23
-rw-r--r--drivers/mmc/host/tmio_mmc.h43
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c152
-rw-r--r--drivers/mtd/devices/docg3.c70
-rw-r--r--drivers/mtd/devices/m25p80.c9
-rw-r--r--drivers/mtd/devices/mchp23k256.c18
-rw-r--r--drivers/mtd/mtdcore.c38
-rw-r--r--drivers/mtd/mtdpart.c43
-rw-r--r--drivers/mtd/mtdswap.c5
-rw-r--r--drivers/mtd/nand/Kconfig17
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c9
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c6
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c40
-rw-r--r--drivers/mtd/nand/cafe_nand.c52
-rw-r--r--drivers/mtd/nand/denali.c84
-rw-r--r--drivers/mtd/nand/denali.h4
-rw-r--r--drivers/mtd/nand/denali_pci.c4
-rw-r--r--drivers/mtd/nand/diskonchip.c4
-rw-r--r--drivers/mtd/nand/docg4.c21
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c10
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c13
-rw-r--r--drivers/mtd/nand/fsmc_nand.c9
-rw-r--r--drivers/mtd/nand/gpio.c6
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c117
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h46
-rw-r--r--drivers/mtd/nand/hisi504_nand.c9
-rw-r--r--drivers/mtd/nand/jz4740_nand.c16
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c7
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c33
-rw-r--r--drivers/mtd/nand/marvell_nand.c2896
-rw-r--r--drivers/mtd/nand/mtk_ecc.c126
-rw-r--r--drivers/mtd/nand/mtk_ecc.h3
-rw-r--r--drivers/mtd/nand/mtk_nand.c76
-rw-r--r--drivers/mtd/nand/nand_base.c2309
-rw-r--r--drivers/mtd/nand/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/nand_hynix.c129
-rw-r--r--drivers/mtd/nand/nand_micron.c83
-rw-r--r--drivers/mtd/nand/nand_samsung.c19
-rw-r--r--drivers/mtd/nand/nand_timings.c21
-rw-r--r--drivers/mtd/nand/omap2.c28
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c15
-rw-r--r--drivers/mtd/nand/qcom_nandc.c30
-rw-r--r--drivers/mtd/nand/r852.c11
-rw-r--r--drivers/mtd/nand/sh_flctl.c6
-rw-r--r--drivers/mtd/nand/sm_common.h2
-rw-r--r--drivers/mtd/nand/sunxi_nand.c111
-rw-r--r--drivers/mtd/nand/tango_nand.c27
-rw-r--r--drivers/mtd/nand/tmio_nand.c5
-rw-r--r--drivers/mtd/nand/vf610_nfc.c6
-rw-r--r--drivers/mtd/onenand/Kconfig7
-rw-r--r--drivers/mtd/onenand/omap2.c577
-rw-r--r--drivers/mtd/onenand/onenand_base.c81
-rw-r--r--drivers/mtd/onenand/samsung.c185
-rw-r--r--drivers/mtd/parsers/sharpslpart.c6
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c55
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c8
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c6
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c240
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c75
-rw-r--r--drivers/mtd/tests/nandbiterrs.c2
-rw-r--r--drivers/mtd/tests/oobtest.c21
-rw-r--r--drivers/mtd/ubi/block.c42
-rw-r--r--drivers/mtd/ubi/build.c13
-rw-r--r--drivers/mtd/ubi/eba.c2
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c2
-rw-r--r--drivers/mtd/ubi/fastmap.c5
-rw-r--r--drivers/mtd/ubi/vmt.c15
-rw-r--r--drivers/mtd/ubi/wl.c87
-rw-r--r--drivers/mtd/ubi/wl.h2
-rw-r--r--drivers/mux/core.c4
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c21
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c9
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c1
-rw-r--r--drivers/net/ethernet/3com/3c59x.c90
-rw-r--r--drivers/net/ethernet/8390/mac8390.c33
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c45
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c82
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c80
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h6
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c164
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c21
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c17
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c4
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c9
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c13
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c16
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c3
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h4
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c110
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c27
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c26
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c26
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/marvell/skge.c1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c6
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c55
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c12
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c7
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169.c9
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c27
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c8
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/geneve.c14
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/marvell.c18
-rw-r--r--drivers/net/phy/mdio-sun4i.c6
-rw-r--r--drivers/net/phy/mdio-xgene.c21
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/meson-gxl.c74
-rw-r--r--drivers/net/phy/micrel.c1
-rw-r--r--drivers/net/phy/phy.c9
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/phy/phylink.c7
-rw-r--r--drivers/net/phy/sfp-bus.c6
-rw-r--r--drivers/net/ppp/ppp_generic.c5
-rw-r--r--drivers/net/ppp/pppoe.c11
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/usb/lan78xx.c1
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/usb/r8152.c13
-rw-r--r--drivers/net/usb/usbnet.c8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vrf.c5
-rw-r--r--drivers/net/vxlan.c17
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c23
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c20
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/nubus/Makefile2
-rw-r--r--drivers/nubus/bus.c117
-rw-r--r--drivers/nubus/nubus.c542
-rw-r--r--drivers/nubus/proc.c281
-rw-r--r--drivers/nvdimm/btt.c201
-rw-r--r--drivers/nvdimm/btt.h47
-rw-r--r--drivers/nvdimm/pfn_devs.c20
-rw-r--r--drivers/nvme/host/Makefile4
-rw-r--r--drivers/nvme/host/core.c152
-rw-r--r--drivers/nvme/host/fabrics.c23
-rw-r--r--drivers/nvme/host/fabrics.h2
-rw-r--r--drivers/nvme/host/fc.c8
-rw-r--r--drivers/nvme/host/lightnvm.c185
-rw-r--r--drivers/nvme/host/multipath.c44
-rw-r--r--drivers/nvme/host/nvme.h21
-rw-r--r--drivers/nvme/host/pci.c282
-rw-r--r--drivers/nvme/host/rdma.c20
-rw-r--r--drivers/nvme/host/trace.c130
-rw-r--r--drivers/nvme/host/trace.h165
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/nvme/target/core.c14
-rw-r--r--drivers/nvme/target/fabrics-cmd.c2
-rw-r--r--drivers/nvme/target/fc.c60
-rw-r--r--drivers/nvme/target/fcloop.c246
-rw-r--r--drivers/nvme/target/loop.c3
-rw-r--r--drivers/nvme/target/rdma.c83
-rw-r--r--drivers/nvmem/meson-mx-efuse.c4
-rw-r--r--drivers/of/of_mdio.c12
-rw-r--r--drivers/opp/Makefile1
-rw-r--r--drivers/opp/ti-opp-supply.c425
-rw-r--r--drivers/parisc/dino.c10
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/parisc/lba_pci.c33
-rw-r--r--drivers/pci/host/pci-hyperv.c8
-rw-r--r--drivers/pci/host/pcie-rcar.c8
-rw-r--r--drivers/pci/pci-driver.c30
-rw-r--r--drivers/pci/pcie/portdrv_pci.c3
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c2
-rw-r--r--drivers/phy/phy-core.c4
-rw-r--r--drivers/phy/renesas/Kconfig2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c2
-rw-r--r--drivers/phy/tegra/xusb.c58
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c16
-rw-r--r--drivers/pinctrl/pinctrl-single.c5
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c2
-rw-r--r--drivers/platform/chrome/Kconfig10
-rw-r--r--drivers/platform/chrome/Makefile7
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c5
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.h27
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c6
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c5
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c1
-rw-r--r--drivers/platform/x86/asus-wireless.c1
-rw-r--r--drivers/platform/x86/dell-laptop.c17
-rw-r--r--drivers/platform/x86/dell-wmi.c2
-rw-r--r--drivers/platform/x86/surfacepro3_button.c4
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/pnp/pnpbios/core.c5
-rw-r--r--drivers/pnp/quirks.c1
-rw-r--r--drivers/power/avs/rockchip-io-domain.c24
-rw-r--r--drivers/powercap/intel_rapl.c99
-rw-r--r--drivers/powercap/powercap_sys.c6
-rw-r--r--drivers/regulator/Kconfig7
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/core.c383
-rw-r--r--drivers/regulator/internal.h27
-rw-r--r--drivers/regulator/of_regulator.c34
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c84
-rw-r--r--drivers/regulator/sc2731-regulator.c256
-rw-r--r--drivers/regulator/tps65218-regulator.c5
-rw-r--r--drivers/s390/block/dasd_3990_erp.c10
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/net/qeth_core.h6
-rw-r--r--drivers/s390/net/qeth_core_main.c15
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c36
-rw-r--r--drivers/s390/net/qeth_l3_sys.c75
-rw-r--r--drivers/scsi/aacraid/aacraid.h1
-rw-r--r--drivers/scsi/aacraid/commsup.c8
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c6
-rw-r--r--drivers/scsi/bfa/bfad_im.c6
-rw-r--r--drivers/scsi/bfa/bfad_im.h10
-rw-r--r--drivers/scsi/libfc/fc_lport.c4
-rw-r--r--drivers/scsi/libsas/sas_expander.c10
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/scsi_debugfs.c6
-rw-r--r--drivers/scsi/scsi_devinfo.c33
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_scan.c13
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_transport_spi.c28
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/spi/spi-armada-3700.c118
-rw-r--r--drivers/spi/spi-atmel.c115
-rw-r--r--drivers/spi/spi-bcm53xx.c26
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dw.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c9
-rw-r--r--drivers/spi/spi-imx.c26
-rw-r--r--drivers/spi/spi-jcore.c4
-rw-r--r--drivers/spi/spi-meson-spicc.c1
-rw-r--r--drivers/spi/spi-orion.c21
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-rspi.c4
-rw-r--r--drivers/spi/spi-s3c64xx.c18
-rw-r--r--drivers/spi/spi-sh-msiof.c128
-rw-r--r--drivers/spi/spi-sirf.c4
-rw-r--r--drivers/spi/spi-sun4i.c2
-rw-r--r--drivers/spi/spi-sun6i.c2
-rw-r--r--drivers/spi/spi-xilinx.c12
-rw-r--r--drivers/ssb/Kconfig2
-rw-r--r--drivers/staging/android/ashmem.c2
-rw-r--r--drivers/staging/android/ion/Kconfig2
-rw-r--r--drivers/staging/android/ion/ion.c4
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c15
-rw-r--r--drivers/staging/ccree/ssi_hash.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c23
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c3
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c5
-rw-r--r--drivers/staging/pi433/rf69.c2
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_transport.c46
-rw-r--r--drivers/thermal/cpu_cooling.c201
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/tty/n_tty.c4
-rw-r--r--drivers/tty/serdev/core.c31
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c2
-rw-r--r--drivers/usb/core/config.c6
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/dwc2/core.h4
-rw-r--r--drivers/usb/dwc2/gadget.c42
-rw-r--r--drivers/usb/dwc2/params.c29
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c5
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/Kconfig4
-rw-r--r--drivers/usb/gadget/function/f_ncm.c30
-rw-r--r--drivers/usb/gadget/legacy/Kconfig12
-rw-r--r--drivers/usb/gadget/udc/core.c28
-rw-r--r--drivers/usb/host/xhci-debugfs.c16
-rw-r--r--drivers/usb/host/xhci-mem.c15
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-ring.c6
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/misc/usb3503.c2
-rw-r--r--drivers/usb/mon/mon_bin.c8
-rw-r--r--drivers/usb/musb/da8xx.c10
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/option.c17
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/storage/unusual_uas.h14
-rw-r--r--drivers/usb/usbip/stub_dev.c3
-rw-r--r--drivers/usb/usbip/stub_main.c5
-rw-r--r--drivers/usb/usbip/stub_rx.c47
-rw-r--r--drivers/usb/usbip/stub_tx.c13
-rw-r--r--drivers/usb/usbip/usbip_common.c33
-rw-r--r--drivers/usb/usbip/usbip_common.h1
-rw-r--r--drivers/usb/usbip/vhci_hcd.c12
-rw-r--r--drivers/usb/usbip/vhci_rx.c23
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c25
-rw-r--r--drivers/usb/usbip/vhci_tx.c3
-rw-r--r--drivers/usb/usbip/vudc_rx.c19
-rw-r--r--drivers/usb/usbip/vudc_tx.c11
-rw-r--r--drivers/vhost/vhost.c13
-rw-r--r--drivers/video/backlight/apple_bl.c2
-rw-r--r--drivers/video/backlight/corgi_lcd.c2
-rw-r--r--drivers/video/backlight/tdo24m.c2
-rw-r--r--drivers/video/backlight/tosa_lcd.c2
-rw-r--r--drivers/video/fbdev/macfb.c10
-rw-r--r--drivers/virtio/virtio_mmio.c43
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/rave-sp-wdt.c337
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/balloon.c65
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--drivers/xen/pvcalls-front.c2
899 files changed, 29339 insertions, 12956 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 4650539..d650c5b 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -361,22 +361,6 @@ config ACPI_PCI_SLOT
i.e., segment/bus/device/function tuples, with physical slots in
the system. If you are unsure, say N.
-config X86_PM_TIMER
- bool "Power Management Timer Support" if EXPERT
- depends on X86
- default y
- help
- The Power Management Timer is available on all ACPI-capable,
- in most cases even if ACPI is unusable or blacklisted.
-
- This timing source is not affected by power management features
- like aggressive processor idling, throttling, frequency and/or
- voltage scaling, unlike the commonly used Time Stamp Counter
- (TSC) timing source.
-
- You should nearly always say Y here because many modern
- systems require this timer.
-
config ACPI_CONTAINER
bool "Container and Module Devices"
default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU)
@@ -564,3 +548,19 @@ config TPS68470_PMIC_OPREGION
using this, are probed.
endif # ACPI
+
+config X86_PM_TIMER
+ bool "Power Management Timer Support" if EXPERT
+ depends on X86 && (ACPI || JAILHOUSE_GUEST)
+ default y
+ help
+ The Power Management Timer is available on all ACPI-capable,
+ in most cases even if ACPI is unusable or blacklisted.
+
+ This timing source is not affected by power management features
+ like aggressive processor idling, throttling, frequency and/or
+ voltage scaling, unlike the commonly used Time Stamp Counter
+ (TSC) timing source.
+
+ You should nearly always say Y here because many modern
+ systems require this timer.
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 7f2b02c..2bcffec 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -427,6 +427,142 @@ out:
return 0;
}
+struct lpss_device_links {
+ const char *supplier_hid;
+ const char *supplier_uid;
+ const char *consumer_hid;
+ const char *consumer_uid;
+ u32 flags;
+};
+
+/*
+ * The _DEP method is used to identify dependencies but instead of creating
+ * device links for every handle in _DEP, only links in the following list are
+ * created. That is necessary because, in the general case, _DEP can refer to
+ * devices that might not have drivers, or that are on different buses, or where
+ * the supplier is not enumerated until after the consumer is probed.
+ */
+static const struct lpss_device_links lpss_device_links[] = {
+ {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
+};
+
+static bool hid_uid_match(const char *hid1, const char *uid1,
+ const char *hid2, const char *uid2)
+{
+ return !strcmp(hid1, hid2) && uid1 && uid2 && !strcmp(uid1, uid2);
+}
+
+static bool acpi_lpss_is_supplier(struct acpi_device *adev,
+ const struct lpss_device_links *link)
+{
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ link->supplier_hid, link->supplier_uid);
+}
+
+static bool acpi_lpss_is_consumer(struct acpi_device *adev,
+ const struct lpss_device_links *link)
+{
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ link->consumer_hid, link->consumer_uid);
+}
+
+struct hid_uid {
+ const char *hid;
+ const char *uid;
+};
+
+static int match_hid_uid(struct device *dev, void *data)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct hid_uid *id = data;
+
+ if (!adev)
+ return 0;
+
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ id->hid, id->uid);
+}
+
+static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
+{
+ struct hid_uid data = {
+ .hid = hid,
+ .uid = uid,
+ };
+
+ return bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
+}
+
+static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
+{
+ struct acpi_handle_list dep_devices;
+ acpi_status status;
+ int i;
+
+ if (!acpi_has_method(adev->handle, "_DEP"))
+ return false;
+
+ status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
+ &dep_devices);
+ if (ACPI_FAILURE(status)) {
+ dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
+ return false;
+ }
+
+ for (i = 0; i < dep_devices.count; i++) {
+ if (dep_devices.handles[i] == handle)
+ return true;
+ }
+
+ return false;
+}
+
+static void acpi_lpss_link_consumer(struct device *dev1,
+ const struct lpss_device_links *link)
+{
+ struct device *dev2;
+
+ dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid);
+ if (!dev2)
+ return;
+
+ if (acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
+ device_link_add(dev2, dev1, link->flags);
+
+ put_device(dev2);
+}
+
+static void acpi_lpss_link_supplier(struct device *dev1,
+ const struct lpss_device_links *link)
+{
+ struct device *dev2;
+
+ dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid);
+ if (!dev2)
+ return;
+
+ if (acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
+ device_link_add(dev1, dev2, link->flags);
+
+ put_device(dev2);
+}
+
+static void acpi_lpss_create_device_links(struct acpi_device *adev,
+ struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) {
+ const struct lpss_device_links *link = &lpss_device_links[i];
+
+ if (acpi_lpss_is_supplier(adev, link))
+ acpi_lpss_link_consumer(&pdev->dev, link);
+
+ if (acpi_lpss_is_consumer(adev, link))
+ acpi_lpss_link_supplier(&pdev->dev, link);
+ }
+}
+
static int acpi_lpss_create_device(struct acpi_device *adev,
const struct acpi_device_id *id)
{
@@ -465,6 +601,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
acpi_dev_free_resource_list(&resource_list);
if (!pdata->mmio_base) {
+ /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
+ adev->pnp.type.platform_id = 0;
/* Skip the device, but continue the namespace scan. */
ret = 0;
goto err_out;
@@ -500,6 +638,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev)) {
+ acpi_lpss_create_device_links(adev, pdev);
return 1;
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 0972ec0..f53ccc6 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -80,8 +80,8 @@ MODULE_PARM_DESC(report_key_events,
static bool device_id_scheme = false;
module_param(device_id_scheme, bool, 0444);
-static bool only_lcd = false;
-module_param(only_lcd, bool, 0444);
+static int only_lcd = -1;
+module_param(only_lcd, int, 0444);
static int register_count;
static DEFINE_MUTEX(register_count_mutex);
@@ -2136,6 +2136,16 @@ int acpi_video_register(void)
goto leave;
}
+ /*
+ * We're seeing a lot of bogus backlight interfaces on newer machines
+ * without a LCD such as desktops, servers and HDMI sticks. Checking
+ * the lcd flag fixes this, so enable this on any machines which are
+ * win8 ready (where we also prefer the native backlight driver, so
+ * normally the acpi_video code should not register there anyways).
+ */
+ if (only_lcd == -1)
+ only_lcd = acpi_osi_is_win8();
+
dmi_check_system(video_dmi_table);
ret = acpi_bus_register_driver(&acpi_video_bus);
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 7a1a68b..2243c81 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -80,6 +80,9 @@
prefix, ACPICA_COPYRIGHT, \
prefix
+#define ACPI_COMMON_BUILD_TIME \
+ "Build date/time: %s %s\n", __DATE__, __TIME__
+
/* Macros for usage messages */
#define ACPI_USAGE_HEADER(usage) \
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 71743e5..54b8d9d 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -223,6 +223,10 @@ void
acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags);
void
+acpi_db_create_execution_thread(char *method_name_arg,
+ char **arguments, acpi_object_type *types);
+
+void
acpi_db_create_execution_threads(char *num_threads_arg,
char *num_loops_arg, char *method_name_arg);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 95eed44..45ef3f5 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -46,7 +46,7 @@
/*****************************************************************************
*
- * Globals related to the ACPI tables
+ * Globals related to the incoming ACPI tables
*
****************************************************************************/
@@ -87,7 +87,7 @@ ACPI_GLOBAL(u8, acpi_gbl_integer_nybble_width);
/*****************************************************************************
*
- * Mutual exclusion within ACPICA subsystem
+ * Mutual exclusion within the ACPICA subsystem
*
****************************************************************************/
@@ -167,7 +167,7 @@ ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
ACPI_INIT_GLOBAL(u8, acpi_gbl_namespace_initialized, FALSE);
-/* Misc */
+/* Miscellaneous */
ACPI_GLOBAL(u32, acpi_gbl_original_mode);
ACPI_GLOBAL(u32, acpi_gbl_ns_lookup_count);
@@ -191,10 +191,9 @@ extern const char acpi_gbl_lower_hex_digits[];
extern const char acpi_gbl_upper_hex_digits[];
extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-
/* Lists for tracking memory allocations (debug only) */
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list);
ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list);
ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats);
@@ -203,7 +202,7 @@ ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking);
/*****************************************************************************
*
- * Namespace globals
+ * ACPI Namespace
*
****************************************************************************/
@@ -234,15 +233,20 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
/*****************************************************************************
*
- * Interpreter globals
+ * Interpreter/Parser globals
*
****************************************************************************/
-ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
-
/* Control method single step flag */
ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
+ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
+ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
+
+/* ASL/ASL+ converter */
+
+ACPI_INIT_GLOBAL(u8, acpi_gbl_capture_comments, FALSE);
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
/*****************************************************************************
*
@@ -252,7 +256,6 @@ ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
extern struct acpi_bit_register_info
acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
-
ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a);
ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
@@ -263,7 +266,6 @@ ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
****************************************************************************/
#if (!ACPI_REDUCED_HARDWARE)
-
ACPI_GLOBAL(u8, acpi_gbl_all_gpes_initialized);
ACPI_GLOBAL(struct acpi_gpe_xrupt_info *, acpi_gbl_gpe_xrupt_list_head);
ACPI_GLOBAL(struct acpi_gpe_block_info *,
@@ -272,10 +274,8 @@ ACPI_GLOBAL(acpi_gbl_event_handler, acpi_gbl_global_event_handler);
ACPI_GLOBAL(void *, acpi_gbl_global_event_handler_context);
ACPI_GLOBAL(struct acpi_fixed_event_handler,
acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]);
-
extern struct acpi_fixed_event_info
acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
-
#endif /* !ACPI_REDUCED_HARDWARE */
/*****************************************************************************
@@ -291,14 +291,14 @@ ACPI_GLOBAL(u32, acpi_gpe_count);
ACPI_GLOBAL(u32, acpi_sci_count);
ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]);
-/* Support for dynamic control method tracing mechanism */
+/* Dynamic control method tracing mechanism */
ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
/*****************************************************************************
*
- * Debugger and Disassembler globals
+ * Debugger and Disassembler
*
****************************************************************************/
@@ -326,7 +326,6 @@ ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list);
#endif
#ifdef ACPI_DEBUGGER
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
ACPI_INIT_GLOBAL(acpi_thread_id, acpi_gbl_db_thread_id, ACPI_INVALID_THREAD_ID);
@@ -340,7 +339,6 @@ ACPI_GLOBAL(u32, acpi_gbl_db_console_debug_level);
ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_db_scope_node);
ACPI_GLOBAL(u8, acpi_gbl_db_terminate_loop);
ACPI_GLOBAL(u8, acpi_gbl_db_threads_terminated);
-
ACPI_GLOBAL(char *, acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]);
ACPI_GLOBAL(acpi_object_type, acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]);
@@ -350,32 +348,33 @@ ACPI_GLOBAL(char, acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]);
ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]);
ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
-/*
- * Statistic globals
- */
+/* Statistics globals */
+
ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TOTAL_TYPES]);
ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TOTAL_TYPES]);
ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
ACPI_GLOBAL(u32, acpi_gbl_num_objects);
-
#endif /* ACPI_DEBUGGER */
#if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER)
-
ACPI_GLOBAL(const char, *acpi_gbl_pld_panel_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_vertical_position_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_horizontal_position_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]);
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_disasm_flag, FALSE);
-
#endif
-/*
- * Meant for the -ca option.
- */
+/*****************************************************************************
+ *
+ * ACPICA application-specific globals
+ *
+ ****************************************************************************/
+
+/* ASL-to-ASL+ conversion utility (implemented within the iASL compiler) */
+
+#ifdef ACPI_ASL_COMPILER
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_inline_comment, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_end_node_comment, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_open_brace_comment, NULL);
@@ -386,23 +385,18 @@ ACPI_INIT_GLOBAL(char *, acpi_gbl_current_filename, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_parent_filename, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_include_filename, NULL);
-ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
@@ -410,30 +404,18 @@ ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
ACPI_INIT_GLOBAL(struct acpi_comment_addr_node,
*acpi_gbl_comment_addr_list_head, NULL);
-
-ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
-
ACPI_INIT_GLOBAL(struct acpi_file_node, *acpi_gbl_file_tree_root, NULL);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_reg_comment_cache);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_comment_addr_cache);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_file_cache);
-ACPI_INIT_GLOBAL(u8, gbl_capture_comments, FALSE);
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_asl_conversion, FALSE);
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_conv_debug_file, NULL);
-
ACPI_GLOBAL(char, acpi_gbl_table_sig[4]);
-
-/*****************************************************************************
- *
- * Application globals
- *
- ****************************************************************************/
+#endif
#ifdef ACPI_APPLICATION
-
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL);
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_output_file, NULL);
ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE);
@@ -442,16 +424,6 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE);
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_print_lock); /* For print buffer */
ACPI_GLOBAL(char, acpi_gbl_print_buffer[1024]);
-
#endif /* ACPI_APPLICATION */
-/*****************************************************************************
- *
- * Info/help support
- *
- ****************************************************************************/
-
-extern const struct ah_predefined_name asl_predefined_info[];
-extern const struct ah_device_id asl_device_ids[];
-
#endif /* __ACGLOBAL_H__ */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0d45b8b..a56675f 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -622,7 +622,7 @@ struct acpi_control_state {
union acpi_parse_object *predicate_op;
u8 *aml_predicate_start; /* Start of if/while predicate */
u8 *package_end; /* End of if/while block */
- u32 loop_count; /* While() loop counter */
+ u64 loop_timeout; /* While() loop timeout */
};
/*
@@ -1218,16 +1218,17 @@ struct acpi_db_method_info {
acpi_object_type *types;
/*
- * Arguments to be passed to method for the command
- * Threads -
- * the Number of threads, ID of current thread and
- * Index of current thread inside all them created.
+ * Arguments to be passed to method for the commands Threads and
+ * Background. Note, ACPI specifies a maximum of 7 arguments (0 - 6).
+ *
+ * For the Threads command, the Number of threads, ID of current
+ * thread and Index of current thread inside all them created.
*/
char init_args;
#ifdef ACPI_DEBUGGER
- acpi_object_type arg_types[4];
+ acpi_object_type arg_types[ACPI_METHOD_NUM_ARGS];
#endif
- char *arguments[4];
+ char *arguments[ACPI_METHOD_NUM_ARGS];
char num_threads_str[11];
char id_of_thread_str[11];
char index_of_thread_str[11];
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index c7f0c96..128a3d7 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -455,7 +455,7 @@
* the plist contains a set of parens to allow variable-length lists.
* These macros are used for both the debug and non-debug versions of the code.
*/
-#define ACPI_ERROR_NAMESPACE(s, e) acpi_ut_namespace_error (AE_INFO, s, e);
+#define ACPI_ERROR_NAMESPACE(s, p, e) acpi_ut_prefixed_namespace_error (AE_INFO, s, p, e);
#define ACPI_ERROR_METHOD(s, n, p, e) acpi_ut_method_error (AE_INFO, s, n, p, e);
#define ACPI_WARN_PREDEFINED(plist) acpi_ut_predefined_warning plist
#define ACPI_INFO_PREDEFINED(plist) acpi_ut_predefined_info plist
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 54a0c51..2fb1bb7 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -289,6 +289,9 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
u8 no_trailing);
+char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
+ const char *internal_path);
+
char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
acpi_status
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 83b75e9..b6b29d7 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -118,9 +118,6 @@ extern const char *acpi_gbl_ptyp_decode[];
#ifndef ACPI_MSG_ERROR
#define ACPI_MSG_ERROR "ACPI Error: "
#endif
-#ifndef ACPI_MSG_EXCEPTION
-#define ACPI_MSG_EXCEPTION "ACPI Exception: "
-#endif
#ifndef ACPI_MSG_WARNING
#define ACPI_MSG_WARNING "ACPI Warning: "
#endif
@@ -129,10 +126,10 @@ extern const char *acpi_gbl_ptyp_decode[];
#endif
#ifndef ACPI_MSG_BIOS_ERROR
-#define ACPI_MSG_BIOS_ERROR "ACPI BIOS Error (bug): "
+#define ACPI_MSG_BIOS_ERROR "Firmware Error (ACPI): "
#endif
#ifndef ACPI_MSG_BIOS_WARNING
-#define ACPI_MSG_BIOS_WARNING "ACPI BIOS Warning (bug): "
+#define ACPI_MSG_BIOS_WARNING "Firmware Warning (ACPI): "
#endif
/*
@@ -233,10 +230,10 @@ u64 acpi_ut_implicit_strtoul64(char *string);
*/
acpi_status acpi_ut_init_globals(void);
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-
const char *acpi_ut_get_mutex_name(u32 mutex_id);
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type);
#endif
@@ -641,9 +638,11 @@ void ut_convert_backslashes(char *pathname);
void acpi_ut_repair_name(char *name);
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) || defined (ACPI_DEBUG_OUTPUT)
u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source);
+void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size);
+
u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source);
u8
@@ -737,9 +736,11 @@ acpi_ut_predefined_bios_error(const char *module_name,
u8 node_flags, const char *format, ...);
void
-acpi_ut_namespace_error(const char *module_name,
- u32 line_number,
- const char *internal_name, acpi_status lookup_status);
+acpi_ut_prefixed_namespace_error(const char *module_name,
+ u32 line_number,
+ union acpi_generic_state *prefix_scope,
+ const char *internal_name,
+ acpi_status lookup_status);
void
acpi_ut_method_error(const char *module_name,
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index 3b30319..ed088fc 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -67,6 +67,8 @@ static acpi_status
acpi_db_execution_walk(acpi_handle obj_handle,
u32 nesting_level, void *context, void **return_value);
+static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context);
+
/*******************************************************************************
*
* FUNCTION: acpi_db_delete_objects
@@ -229,7 +231,7 @@ static acpi_status acpi_db_execute_setup(struct acpi_db_method_info *info)
ACPI_FUNCTION_NAME(db_execute_setup);
- /* Catenate the current scope to the supplied name */
+ /* Concatenate the current scope to the supplied name */
info->pathname[0] = 0;
if ((info->name[0] != '\\') && (info->name[0] != '/')) {
@@ -611,6 +613,112 @@ static void ACPI_SYSTEM_XFACE acpi_db_method_thread(void *context)
/*******************************************************************************
*
+ * FUNCTION: acpi_db_single_execution_thread
+ *
+ * PARAMETERS: context - Method info struct
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Create one thread and execute a method
+ *
+ ******************************************************************************/
+
+static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context)
+{
+ struct acpi_db_method_info *info = context;
+ acpi_status status;
+ struct acpi_buffer return_obj;
+
+ acpi_os_printf("\n");
+
+ status = acpi_db_execute_method(info, &return_obj);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("%s During evaluation of %s\n",
+ acpi_format_exception(status), info->pathname);
+ return;
+ }
+
+ /* Display a return object, if any */
+
+ if (return_obj.length) {
+ acpi_os_printf("Evaluation of %s returned object %p, "
+ "external buffer length %X\n",
+ acpi_gbl_db_method_info.pathname,
+ return_obj.pointer, (u32)return_obj.length);
+
+ acpi_db_dump_external_object(return_obj.pointer, 1);
+ }
+
+ acpi_os_printf("\nBackground thread completed\n%c ",
+ ACPI_DEBUGGER_COMMAND_PROMPT);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_db_create_execution_thread
+ *
+ * PARAMETERS: method_name_arg - Control method to execute
+ * arguments - Array of arguments to the method
+ * types - Corresponding array of object types
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Create a single thread to evaluate a namespace object. Handles
+ * arguments passed on command line for control methods.
+ *
+ ******************************************************************************/
+
+void
+acpi_db_create_execution_thread(char *method_name_arg,
+ char **arguments, acpi_object_type *types)
+{
+ acpi_status status;
+ u32 i;
+
+ memset(&acpi_gbl_db_method_info, 0, sizeof(struct acpi_db_method_info));
+ acpi_gbl_db_method_info.name = method_name_arg;
+ acpi_gbl_db_method_info.init_args = 1;
+ acpi_gbl_db_method_info.args = acpi_gbl_db_method_info.arguments;
+ acpi_gbl_db_method_info.types = acpi_gbl_db_method_info.arg_types;
+
+ /* Setup method arguments, up to 7 (0-6) */
+
+ for (i = 0; (i < ACPI_METHOD_NUM_ARGS) && *arguments; i++) {
+ acpi_gbl_db_method_info.arguments[i] = *arguments;
+ arguments++;
+
+ acpi_gbl_db_method_info.arg_types[i] = *types;
+ types++;
+ }
+
+ status = acpi_db_execute_setup(&acpi_gbl_db_method_info);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ /* Get the NS node, determines existence also */
+
+ status = acpi_get_handle(NULL, acpi_gbl_db_method_info.pathname,
+ &acpi_gbl_db_method_info.method);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("%s Could not get handle for %s\n",
+ acpi_format_exception(status),
+ acpi_gbl_db_method_info.pathname);
+ return;
+ }
+
+ status = acpi_os_execute(OSL_DEBUGGER_EXEC_THREAD,
+ acpi_db_single_execution_thread,
+ &acpi_gbl_db_method_info);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ acpi_os_printf("\nBackground thread started\n");
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_db_create_execution_threads
*
* PARAMETERS: num_threads_arg - Number of threads to create
diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c
index 4d81ea2..cf96079 100644
--- a/drivers/acpi/acpica/dbfileio.c
+++ b/drivers/acpi/acpica/dbfileio.c
@@ -99,8 +99,8 @@ void acpi_db_open_debug_file(char *name)
}
acpi_os_printf("Debug output file %s opened\n", name);
- strncpy(acpi_gbl_db_debug_filename, name,
- sizeof(acpi_gbl_db_debug_filename));
+ acpi_ut_safe_strncpy(acpi_gbl_db_debug_filename, name,
+ sizeof(acpi_gbl_db_debug_filename));
acpi_gbl_db_output_to_file = TRUE;
}
#endif
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 2626d79..954ca3b 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -136,6 +136,7 @@ enum acpi_ex_debugger_commands {
CMD_UNLOAD,
CMD_TERMINATE,
+ CMD_BACKGROUND,
CMD_THREADS,
CMD_TEST,
@@ -212,6 +213,7 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
{"UNLOAD", 1},
{"TERMINATE", 0},
+ {"BACKGROUND", 1},
{"THREADS", 3},
{"TEST", 1},
@@ -222,9 +224,56 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
/*
* Help for all debugger commands. First argument is the number of lines
* of help to output for the command.
+ *
+ * Note: Some commands are not supported by the kernel-level version of
+ * the debugger.
*/
static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
- {0, "\nGeneral-Purpose Commands:", "\n"},
+ {0, "\nNamespace Access:", "\n"},
+ {1, " Businfo", "Display system bus info\n"},
+ {1, " Disassemble <Method>", "Disassemble a control method\n"},
+ {1, " Find <AcpiName> (? is wildcard)",
+ "Find ACPI name(s) with wildcards\n"},
+ {1, " Integrity", "Validate namespace integrity\n"},
+ {1, " Methods", "Display list of loaded control methods\n"},
+ {1, " Namespace [Object] [Depth]",
+ "Display loaded namespace tree/subtree\n"},
+ {1, " Notify <Object> <Value>", "Send a notification on Object\n"},
+ {1, " Objects [ObjectType]",
+ "Display summary of all objects or just given type\n"},
+ {1, " Owner <OwnerId> [Depth]",
+ "Display loaded namespace by object owner\n"},
+ {1, " Paths", "Display full pathnames of namespace objects\n"},
+ {1, " Predefined", "Check all predefined names\n"},
+ {1, " Prefix [<Namepath>]", "Set or Get current execution prefix\n"},
+ {1, " References <Addr>", "Find all references to object at addr\n"},
+ {1, " Resources [DeviceName]",
+ "Display Device resources (no arg = all devices)\n"},
+ {1, " Set N <NamedObject> <Value>", "Set value for named integer\n"},
+ {1, " Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
+ {1, " Type <Object>", "Display object type\n"},
+
+ {0, "\nControl Method Execution:", "\n"},
+ {1, " Evaluate <Namepath> [Arguments]",
+ "Evaluate object or control method\n"},
+ {1, " Execute <Namepath> [Arguments]", "Synonym for Evaluate\n"},
+#ifdef ACPI_APPLICATION
+ {1, " Background <Namepath> [Arguments]",
+ "Evaluate object/method in a separate thread\n"},
+ {1, " Thread <Threads><Loops><NamePath>",
+ "Spawn threads to execute method(s)\n"},
+#endif
+ {1, " Debug <Namepath> [Arguments]", "Single-Step a control method\n"},
+ {7, " [Arguments] formats:", "Control method argument formats\n"},
+ {1, " Hex Integer", "Integer\n"},
+ {1, " \"Ascii String\"", "String\n"},
+ {1, " (Hex Byte List)", "Buffer\n"},
+ {1, " (01 42 7A BF)", "Buffer example (4 bytes)\n"},
+ {1, " [Package Element List]", "Package\n"},
+ {1, " [0x01 0x1234 \"string\"]",
+ "Package example (3 elements)\n"},
+
+ {0, "\nMiscellaneous:", "\n"},
{1, " Allocations", "Display list of current memory allocations\n"},
{2, " Dump <Address>|<Namepath>", "\n"},
{0, " [Byte|Word|Dword|Qword]",
@@ -248,46 +297,30 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
{1, " Stack", "Display CPU stack usage\n"},
{1, " Tables", "Info about current ACPI table(s)\n"},
{1, " Tables", "Display info about loaded ACPI tables\n"},
+#ifdef ACPI_APPLICATION
+ {1, " Terminate", "Delete namespace and all internal objects\n"},
+#endif
{1, " ! <CommandNumber>", "Execute command from history buffer\n"},
{1, " !!", "Execute last command again\n"},
- {0, "\nNamespace Access Commands:", "\n"},
- {1, " Businfo", "Display system bus info\n"},
- {1, " Disassemble <Method>", "Disassemble a control method\n"},
- {1, " Find <AcpiName> (? is wildcard)",
- "Find ACPI name(s) with wildcards\n"},
- {1, " Integrity", "Validate namespace integrity\n"},
- {1, " Methods", "Display list of loaded control methods\n"},
- {1, " Namespace [Object] [Depth]",
- "Display loaded namespace tree/subtree\n"},
- {1, " Notify <Object> <Value>", "Send a notification on Object\n"},
- {1, " Objects [ObjectType]",
- "Display summary of all objects or just given type\n"},
- {1, " Owner <OwnerId> [Depth]",
- "Display loaded namespace by object owner\n"},
- {1, " Paths", "Display full pathnames of namespace objects\n"},
- {1, " Predefined", "Check all predefined names\n"},
- {1, " Prefix [<Namepath>]", "Set or Get current execution prefix\n"},
- {1, " References <Addr>", "Find all references to object at addr\n"},
- {1, " Resources [DeviceName]",
- "Display Device resources (no arg = all devices)\n"},
- {1, " Set N <NamedObject> <Value>", "Set value for named integer\n"},
- {1, " Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
- {1, " Type <Object>", "Display object type\n"},
+ {0, "\nMethod and Namespace Debugging:", "\n"},
+ {5, " Trace <State> [<Namepath>] [Once]",
+ "Trace control method execution\n"},
+ {1, " Enable", "Enable all messages\n"},
+ {1, " Disable", "Disable tracing\n"},
+ {1, " Method", "Enable method execution messages\n"},
+ {1, " Opcode", "Enable opcode execution messages\n"},
+ {3, " Test <TestName>", "Invoke a debug test\n"},
+ {1, " Objects", "Read/write/compare all namespace data objects\n"},
+ {1, " Predefined",
+ "Validate all ACPI predefined names (_STA, etc.)\n"},
+ {1, " Execute predefined",
+ "Execute all predefined (public) methods\n"},
- {0, "\nControl Method Execution Commands:", "\n"},
+ {0, "\nControl Method Single-Step Execution:", "\n"},
{1, " Arguments (or Args)", "Display method arguments\n"},
{1, " Breakpoint <AmlOffset>", "Set an AML execution breakpoint\n"},
{1, " Call", "Run to next control method invocation\n"},
- {1, " Debug <Namepath> [Arguments]", "Single Step a control method\n"},
- {6, " Evaluate", "Synonym for Execute\n"},
- {5, " Execute <Namepath> [Arguments]", "Execute control method\n"},
- {1, " Hex Integer", "Integer method argument\n"},
- {1, " \"Ascii String\"", "String method argument\n"},
- {1, " (Hex Byte List)", "Buffer method argument\n"},
- {1, " [Package Element List]", "Package method argument\n"},
- {5, " Execute predefined",
- "Execute all predefined (public) methods\n"},
{1, " Go", "Allow method to run to completion\n"},
{1, " Information", "Display info about the current method\n"},
{1, " Into", "Step into (not over) a method call\n"},
@@ -296,41 +329,24 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
{1, " Results", "Display method result stack\n"},
{1, " Set <A|L> <#> <Value>", "Set method data (Arguments/Locals)\n"},
{1, " Stop", "Terminate control method\n"},
- {5, " Trace <State> [<Namepath>] [Once]",
- "Trace control method execution\n"},
- {1, " Enable", "Enable all messages\n"},
- {1, " Disable", "Disable tracing\n"},
- {1, " Method", "Enable method execution messages\n"},
- {1, " Opcode", "Enable opcode execution messages\n"},
{1, " Tree", "Display control method calling tree\n"},
{1, " <Enter>", "Single step next AML opcode (over calls)\n"},
#ifdef ACPI_APPLICATION
- {0, "\nHardware Simulation Commands:", "\n"},
- {1, " EnableAcpi", "Enable ACPI (hardware) mode\n"},
- {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
- {1, " Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"},
- {1, " Gpes", "Display info on all GPE devices\n"},
- {1, " Sci", "Generate an SCI\n"},
- {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
-
- {0, "\nFile I/O Commands:", "\n"},
+ {0, "\nFile Operations:", "\n"},
{1, " Close", "Close debug output file\n"},
{1, " Load <Input Filename>", "Load ACPI table from a file\n"},
{1, " Open <Output Filename>", "Open a file for debug output\n"},
{1, " Unload <Namepath>",
"Unload an ACPI table via namespace object\n"},
- {0, "\nUser Space Commands:", "\n"},
- {1, " Terminate", "Delete namespace and all internal objects\n"},
- {1, " Thread <Threads><Loops><NamePath>",
- "Spawn threads to execute method(s)\n"},
-
- {0, "\nDebug Test Commands:", "\n"},
- {3, " Test <TestName>", "Invoke a debug test\n"},
- {1, " Objects", "Read/write/compare all namespace data objects\n"},
- {1, " Predefined",
- "Execute all ACPI predefined names (_STA, etc.)\n"},
+ {0, "\nHardware Simulation:", "\n"},
+ {1, " EnableAcpi", "Enable ACPI (hardware) mode\n"},
+ {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
+ {1, " Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"},
+ {1, " Gpes", "Display info on all GPE devices\n"},
+ {1, " Sci", "Generate an SCI\n"},
+ {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
#endif
{0, NULL, NULL}
};
@@ -442,11 +458,15 @@ static void acpi_db_display_help(char *command)
/* No argument to help, display help for all commands */
+ acpi_os_printf("\nSummary of AML Debugger Commands\n\n");
+
while (next->invocation) {
acpi_os_printf("%-38s%s", next->invocation,
next->description);
next++;
}
+ acpi_os_printf("\n");
+
} else {
/* Display help for all commands that match the subtring */
@@ -1087,6 +1107,13 @@ acpi_db_command_dispatch(char *input_buffer,
/* acpi_initialize (NULL); */
break;
+ case CMD_BACKGROUND:
+
+ acpi_db_create_execution_thread(acpi_gbl_db_args[1],
+ &acpi_gbl_db_args[2],
+ &acpi_gbl_db_arg_types[2]);
+ break;
+
case CMD_THREADS:
acpi_db_create_execution_threads(acpi_gbl_db_args[1],
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index f470e81..4b6ebc2 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -118,6 +118,8 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
control_state->control.package_end =
walk_state->parser_state.pkg_end;
control_state->control.opcode = op->common.aml_opcode;
+ control_state->control.loop_timeout = acpi_os_get_timer() +
+ (u64)(acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
/* Push the control state on this walk's control stack */
@@ -206,15 +208,15 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
/* Predicate was true, the body of the loop was just executed */
/*
- * This loop counter mechanism allows the interpreter to escape
- * possibly infinite loops. This can occur in poorly written AML
- * when the hardware does not respond within a while loop and the
- * loop does not implement a timeout.
+ * This infinite loop detection mechanism allows the interpreter
+ * to escape possibly infinite loops. This can occur in poorly
+ * written AML when the hardware does not respond within a while
+ * loop and the loop does not implement a timeout.
*/
- control_state->control.loop_count++;
- if (control_state->control.loop_count >
- acpi_gbl_max_loop_iterations) {
- status = AE_AML_INFINITE_LOOP;
+ if (ACPI_TIME_AFTER(acpi_os_get_timer(),
+ control_state->control.
+ loop_timeout)) {
+ status = AE_AML_LOOP_TIMEOUT;
break;
}
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 7bcf5f5e..0cab34a 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -209,7 +209,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
ACPI_IMODE_LOAD_PASS1, flags,
walk_state, &node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
}
@@ -383,7 +384,9 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
walk_state,
&info->connection_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(child->common.
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ child->common.
value.name,
status);
return_ACPI_STATUS(status);
@@ -402,7 +405,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
ACPI_NS_DONT_OPEN_SCOPE,
walk_state, &info->field_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ (char *)&arg->named.name,
status);
return_ACPI_STATUS(status);
} else {
@@ -498,7 +502,8 @@ acpi_ds_create_field(union acpi_parse_object *op,
&region_node);
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.name, status);
return_ACPI_STATUS(status);
}
}
@@ -618,7 +623,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
ACPI_IMODE_LOAD_PASS1, flags,
walk_state, &node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ (char *)&arg->named.name,
status);
if (status != AE_ALREADY_EXISTS) {
return_ACPI_STATUS(status);
@@ -681,7 +687,8 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
&region_node);
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.name, status);
return_ACPI_STATUS(status);
}
}
@@ -695,7 +702,8 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
@@ -765,7 +773,8 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
@@ -778,7 +787,8 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.data_register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 8244855..b21fe08 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -112,7 +112,9 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
acpi_namespace_node,
&(op->common.node)));
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(op->common.value.
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ op->common.value.
string, status);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index 6d487ed..5a602b7 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -297,8 +297,10 @@ acpi_ds_init_package_element(u8 object_type,
{
union acpi_operand_object **element_ptr;
+ ACPI_FUNCTION_TRACE(ds_init_package_element);
+
if (!source_object) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/*
@@ -329,7 +331,7 @@ acpi_ds_init_package_element(u8 object_type,
source_object->package.flags |= AOPOBJ_DATA_VALID;
}
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
@@ -352,6 +354,7 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
union acpi_generic_state scope_info;
union acpi_operand_object *element = *element_ptr;
struct acpi_namespace_node *resolved_node;
+ struct acpi_namespace_node *original_node;
char *external_path = NULL;
acpi_object_type type;
@@ -441,6 +444,7 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
* will remain as named references. This behavior is not described
* in the ACPI spec, but it appears to be an oversight.
*/
+ original_node = resolved_node;
status = acpi_ex_resolve_node_to_value(&resolved_node, NULL);
if (ACPI_FAILURE(status)) {
return_VOID;
@@ -468,26 +472,27 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
*/
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_THERMAL:
-
- /* TBD: This may not be necesssary */
-
- acpi_ut_add_reference(resolved_node->object);
+ case ACPI_TYPE_METHOD:
break;
case ACPI_TYPE_MUTEX:
- case ACPI_TYPE_METHOD:
case ACPI_TYPE_POWER:
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_EVENT:
case ACPI_TYPE_REGION:
+ /* acpi_ex_resolve_node_to_value gave these an extra reference */
+
+ acpi_ut_remove_reference(original_node->object);
break;
default:
/*
* For all other types - the node was resolved to an actual
- * operand object with a value, return the object
+ * operand object with a value, return the object. Remove
+ * a reference on the existing object.
*/
+ acpi_ut_remove_reference(element);
*element_ptr = (union acpi_operand_object *)resolved_node;
break;
}
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 0dabd9b..4c5faf6 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -583,7 +583,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(name_string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ name_string, status);
}
}
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index eaa859a..5771e4e 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -207,7 +207,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
}
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info, path,
+ status);
return_ACPI_STATUS(status);
}
@@ -375,7 +376,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ path, status);
return_ACPI_STATUS(status);
}
}
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index aad83ef..b3d0aae 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -184,11 +184,14 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
if (status == AE_NOT_FOUND) {
status = AE_OK;
} else {
- ACPI_ERROR_NAMESPACE(buffer_ptr,
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ buffer_ptr,
status);
}
#else
- ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ buffer_ptr, status);
#endif
return_ACPI_STATUS(status);
}
@@ -343,7 +346,8 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ buffer_ptr, status);
return_ACPI_STATUS(status);
}
@@ -719,7 +723,8 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
*/
op->common.node = new_node;
} else {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
}
break;
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 28b447f..bb58419 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -298,6 +298,16 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
acpi_ut_get_region_name(region_obj->region.
space_id)));
+
+ /*
+ * Special case for an EC timeout. These are seen so frequently
+ * that an additional error message is helpful
+ */
+ if ((region_obj->region.space_id == ACPI_ADR_SPACE_EC) &&
+ (status == AE_TIME)) {
+ ACPI_ERROR((AE_INFO,
+ "Timeout from EC hardware or EC device driver"));
+ }
}
if (!(handler_desc->address_space.handler_flags &
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 83398dc..b2ff61b 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -617,10 +617,11 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
u32 length;
u32 index;
- ACPI_FUNCTION_NAME(ex_dump_operand)
+ ACPI_FUNCTION_NAME(ex_dump_operand);
- /* Check if debug output enabled */
- if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
return;
}
@@ -904,7 +905,7 @@ void
acpi_ex_dump_operands(union acpi_operand_object **operands,
const char *opcode_name, u32 num_operands)
{
- ACPI_FUNCTION_NAME(ex_dump_operands);
+ ACPI_FUNCTION_TRACE(ex_dump_operands);
if (!opcode_name) {
opcode_name = "UNKNOWN";
@@ -928,7 +929,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"**** End operand dump for [%s]\n", opcode_name));
- return;
+ return_VOID;
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index a2f4e25..5b42829 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -150,10 +150,10 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer)
*
******************************************************************************/
acpi_status
-acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
+acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 *time_elapsed)
{
acpi_status status;
- u32 delta_ticks;
+ u64 delta_ticks;
u64 quotient;
ACPI_FUNCTION_TRACE(acpi_get_timer_duration);
@@ -168,30 +168,29 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
return_ACPI_STATUS(AE_SUPPORT);
}
+ if (start_ticks == end_ticks) {
+ *time_elapsed = 0;
+ return_ACPI_STATUS(AE_OK);
+ }
+
/*
* Compute Tick Delta:
* Handle (max one) timer rollovers on 24-bit versus 32-bit timers.
*/
- if (start_ticks < end_ticks) {
- delta_ticks = end_ticks - start_ticks;
- } else if (start_ticks > end_ticks) {
+ delta_ticks = end_ticks;
+ if (start_ticks > end_ticks) {
if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
/* 24-bit Timer */
- delta_ticks =
- (((0x00FFFFFF - start_ticks) +
- end_ticks) & 0x00FFFFFF);
+ delta_ticks |= (u64)1 << 24;
} else {
/* 32-bit Timer */
- delta_ticks = (0xFFFFFFFF - start_ticks) + end_ticks;
+ delta_ticks |= (u64)1 << 32;
}
- } else { /* start_ticks == end_ticks */
-
- *time_elapsed = 0;
- return_ACPI_STATUS(AE_OK);
}
+ delta_ticks -= start_ticks;
/*
* Compute Duration (Requires a 64-bit multiply and divide):
@@ -199,10 +198,10 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
* time_elapsed (microseconds) =
* (delta_ticks * ACPI_USEC_PER_SEC) / ACPI_PM_TIMER_FREQUENCY;
*/
- status = acpi_ut_short_divide(((u64)delta_ticks) * ACPI_USEC_PER_SEC,
+ status = acpi_ut_short_divide(delta_ticks * ACPI_USEC_PER_SEC,
ACPI_PM_TIMER_FREQUENCY, &quotient, NULL);
- *time_elapsed = (u32) quotient;
+ *time_elapsed = (u32)quotient;
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 3094cec..d167903 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -128,14 +128,14 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
acpi_io_address last_address;
const struct acpi_port_info *port_info;
- ACPI_FUNCTION_NAME(hw_validate_io_request);
+ ACPI_FUNCTION_TRACE(hw_validate_io_request);
/* Supported widths are 8/16/32 */
if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
ACPI_ERROR((AE_INFO,
"Bad BitWidth parameter: %8.8X", bit_width));
- return (AE_BAD_PARAMETER);
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
port_info = acpi_protected_ports;
@@ -153,13 +153,13 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
ACPI_ERROR((AE_INFO,
"Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
ACPI_FORMAT_UINT64(address), byte_width));
- return (AE_LIMIT);
+ return_ACPI_STATUS(AE_LIMIT);
}
/* Exit if requested address is not within the protected port table */
if (address > acpi_protected_ports[ACPI_PORT_INFO_ENTRIES - 1].end) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/* Check request against the list of protected I/O ports */
@@ -180,8 +180,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
/* Port illegality may depend on the _OSI calls made by the BIOS */
if (acpi_gbl_osi_data >= port_info->osi_dependency) {
- ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
+ ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
+ "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n",
ACPI_FORMAT_UINT64(address),
byte_width, port_info->name,
port_info->start,
@@ -198,7 +198,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
}
}
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/******************************************************************************
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index f2733f5..33e652a 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -644,17 +644,18 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
this_node->object;
}
}
-#ifdef ACPI_ASL_COMPILER
- if (!acpi_gbl_disasm_flag &&
- (this_node->flags & ANOBJ_IS_EXTERNAL)) {
- this_node->flags |= IMPLICIT_EXTERNAL;
- }
-#endif
}
/* Special handling for the last segment (num_segments == 0) */
else {
+#ifdef ACPI_ASL_COMPILER
+ if (!acpi_gbl_disasm_flag
+ && (this_node->flags & ANOBJ_IS_EXTERNAL)) {
+ this_node->flags &= ~IMPLICIT_EXTERNAL;
+ }
+#endif
+
/*
* Sanity typecheck of the target object:
*
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 539d775..d55dcc8 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -495,7 +495,8 @@ acpi_ns_convert_to_reference(struct acpi_namespace_node *scope,
/* Check if we are resolving a named reference within a package */
- ACPI_ERROR_NAMESPACE(original_object->string.pointer, status);
+ ACPI_ERROR_NAMESPACE(&scope_info,
+ original_object->string.pointer, status);
goto error_exit;
}
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index a410760..22c92d1 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -49,6 +49,9 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsnames")
+/* Local Prototypes */
+static void acpi_ns_normalize_pathname(char *original_path);
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_external_pathname
@@ -63,6 +66,7 @@ ACPI_MODULE_NAME("nsnames")
* for error and debug statements.
*
******************************************************************************/
+
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
{
char *name_buffer;
@@ -352,3 +356,148 @@ char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
return_PTR(name_buffer);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_build_prefixed_pathname
+ *
+ * PARAMETERS: prefix_scope - Scope/Path that prefixes the internal path
+ * internal_path - Name or path of the namespace node
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Construct a fully qualified pathname from a concatenation of:
+ * 1) Path associated with the prefix_scope namespace node
+ * 2) External path representation of the Internal path
+ *
+ ******************************************************************************/
+
+char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
+ const char *internal_path)
+{
+ acpi_status status;
+ char *full_path = NULL;
+ char *external_path = NULL;
+ char *prefix_path = NULL;
+ u32 prefix_path_length = 0;
+
+ /* If there is a prefix, get the pathname to it */
+
+ if (prefix_scope && prefix_scope->scope.node) {
+ prefix_path =
+ acpi_ns_get_normalized_pathname(prefix_scope->scope.node,
+ TRUE);
+ if (prefix_path) {
+ prefix_path_length = strlen(prefix_path);
+ }
+ }
+
+ status = acpi_ns_externalize_name(ACPI_UINT32_MAX, internal_path,
+ NULL, &external_path);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Merge the prefix path and the path. 2 is for one dot and trailing null */
+
+ full_path =
+ ACPI_ALLOCATE_ZEROED(prefix_path_length + strlen(external_path) +
+ 2);
+ if (!full_path) {
+ goto cleanup;
+ }
+
+ /* Don't merge if the External path is already fully qualified */
+
+ if (prefix_path && (*external_path != '\\') && (*external_path != '^')) {
+ strcat(full_path, prefix_path);
+ if (prefix_path[1]) {
+ strcat(full_path, ".");
+ }
+ }
+
+ acpi_ns_normalize_pathname(external_path);
+ strcat(full_path, external_path);
+
+cleanup:
+ if (prefix_path) {
+ ACPI_FREE(prefix_path);
+ }
+ if (external_path) {
+ ACPI_FREE(external_path);
+ }
+
+ return (full_path);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_normalize_pathname
+ *
+ * PARAMETERS: original_path - Path to be normalized, in External format
+ *
+ * RETURN: The original path is processed in-place
+ *
+ * DESCRIPTION: Remove trailing underscores from each element of a path.
+ *
+ * For example: \A___.B___.C___ becomes \A.B.C
+ *
+ ******************************************************************************/
+
+static void acpi_ns_normalize_pathname(char *original_path)
+{
+ char *input_path = original_path;
+ char *new_path_buffer;
+ char *new_path;
+ u32 i;
+
+ /* Allocate a temp buffer in which to construct the new path */
+
+ new_path_buffer = ACPI_ALLOCATE_ZEROED(strlen(input_path) + 1);
+ new_path = new_path_buffer;
+ if (!new_path_buffer) {
+ return;
+ }
+
+ /* Special characters may appear at the beginning of the path */
+
+ if (*input_path == '\\') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+
+ while (*input_path == '^') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+
+ /* Remainder of the path */
+
+ while (*input_path) {
+
+ /* Do one nameseg at a time */
+
+ for (i = 0; (i < ACPI_NAME_SIZE) && *input_path; i++) {
+ if ((i == 0) || (*input_path != '_')) { /* First char is allowed to be underscore */
+ *new_path = *input_path;
+ new_path++;
+ }
+
+ input_path++;
+ }
+
+ /* Dot means that there are more namesegs to come */
+
+ if (*input_path == '.') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+ }
+
+ *new_path = 0;
+ strcpy(original_path, new_path_buffer);
+ ACPI_FREE(new_path_buffer);
+}
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 5de8957..e91dbee 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -417,6 +417,7 @@ acpi_ns_search_and_enter(u32 target_name,
if (flags & ACPI_NS_EXTERNAL ||
(walk_state && walk_state->opcode == AML_SCOPE_OP)) {
new_node->flags |= ANOBJ_IS_EXTERNAL;
+ new_node->flags |= IMPLICIT_EXTERNAL;
}
#endif
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 783f4c8..9b51f65 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -61,10 +61,10 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
*
* PARAMETERS: handle - Object handle (optional)
* pathname - Object pathname (optional)
- * external_params - List of parameters to pass to method,
+ * external_params - List of parameters to pass to a method,
* terminated by NULL. May be NULL
* if no parameters are being passed.
- * return_buffer - Where to put method's return value (if
+ * return_buffer - Where to put the object's return value (if
* any). If NULL, no value is returned.
* return_type - Expected type of return object
*
@@ -100,13 +100,14 @@ acpi_evaluate_object_typed(acpi_handle handle,
free_buffer_on_error = TRUE;
}
+ /* Get a handle here, in order to build an error message if needed */
+
+ target_handle = handle;
if (pathname) {
status = acpi_get_handle(handle, pathname, &target_handle);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- } else {
- target_handle = handle;
}
full_pathname = acpi_ns_get_external_pathname(target_handle);
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index eb9dfac..171e2fa 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -361,7 +361,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
/* Final exception check (may have been changed from code above) */
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info, path, status);
if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
ACPI_PARSE_EXECUTE) {
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 0bef6df..c0b1798 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -372,16 +372,10 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
* external declaration opcode. Setting walk_state->Aml to
* walk_state->parser_state.Aml + 2 moves increments the
* walk_state->Aml past the object type and the paramcount of the
- * external opcode. For the error message, only print the AML
- * offset. We could attempt to print the name but this may cause
- * a segmentation fault when printing the namepath because the
- * AML may be incorrect.
+ * external opcode.
*/
- acpi_os_printf
- ("// Invalid external declaration at AML offset 0x%x.\n",
- walk_state->aml -
- walk_state->parser_state.aml_start);
walk_state->aml = walk_state->parser_state.aml + 2;
+ walk_state->parser_state.aml = walk_state->aml;
return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
}
#endif
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 0264276..cd59dfe 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -94,9 +94,11 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
op->common.aml_opcode = opcode;
- ACPI_DISASM_ONLY_MEMBERS(strncpy(op->common.aml_op_name,
- (acpi_ps_get_opcode_info(opcode))->
- name, sizeof(op->common.aml_op_name)));
+ ACPI_DISASM_ONLY_MEMBERS(acpi_ut_safe_strncpy(op->common.aml_op_name,
+ (acpi_ps_get_opcode_info
+ (opcode))->name,
+ sizeof(op->common.
+ aml_op_name)));
}
/*******************************************************************************
@@ -158,10 +160,10 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml)
if (opcode == AML_SCOPE_OP) {
acpi_gbl_current_scope = op;
}
- }
- if (gbl_capture_comments) {
- ASL_CV_TRANSFER_COMMENTS(op);
+ if (acpi_gbl_capture_comments) {
+ ASL_CV_TRANSFER_COMMENTS(op);
+ }
}
return (op);
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 615a885..cff7154 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -163,6 +163,9 @@ acpi_debug_print(u32 requested_debug_level,
{
acpi_thread_id thread_id;
va_list args;
+#ifdef ACPI_APPLICATION
+ int fill_count;
+#endif
/* Check if debug output enabled */
@@ -202,10 +205,21 @@ acpi_debug_print(u32 requested_debug_level,
acpi_os_printf("[%u] ", (u32)thread_id);
}
- acpi_os_printf("[%02ld] ", acpi_gbl_nesting_level);
-#endif
+ fill_count = 48 - acpi_gbl_nesting_level -
+ strlen(acpi_ut_trim_function_name(function_name));
+ if (fill_count < 0) {
+ fill_count = 0;
+ }
+
+ acpi_os_printf("[%02ld] %*s",
+ acpi_gbl_nesting_level, acpi_gbl_nesting_level + 1, " ");
+ acpi_os_printf("%s%*s: ",
+ acpi_ut_trim_function_name(function_name), fill_count,
+ " ");
+#else
acpi_os_printf("%-22.22s: ", acpi_ut_trim_function_name(function_name));
+#endif
va_start(args, format);
acpi_os_vprintf(format, args);
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 02cd2c2..55debba 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -395,11 +395,6 @@ const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
return (acpi_gbl_ref_class_names[object->reference.class]);
}
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-/*
- * Strings and procedures used for debug only
- */
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_mutex_name
@@ -433,6 +428,12 @@ const char *acpi_ut_get_mutex_name(u32 mutex_id)
return (acpi_gbl_mutex_names[mutex_id]);
}
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+/*
+ * Strings and procedures used for debug only
+ */
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_notify_name
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index e336818..42388dc 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -182,6 +182,78 @@ acpi_ut_predefined_bios_error(const char *module_name,
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_prefixed_namespace_error
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * prefix_scope - Scope/Path that prefixes the internal path
+ * internal_path - Name or path of the namespace node
+ * lookup_status - Exception code from NS lookup
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print error message with the full pathname constructed this way:
+ *
+ * prefix_scope_node_full_path.externalized_internal_path
+ *
+ * NOTE: 10/2017: Treat the major ns_lookup errors as firmware errors
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_prefixed_namespace_error(const char *module_name,
+ u32 line_number,
+ union acpi_generic_state *prefix_scope,
+ const char *internal_path,
+ acpi_status lookup_status)
+{
+ char *full_path;
+ const char *message;
+
+ /*
+ * Main cases:
+ * 1) Object creation, object must not already exist
+ * 2) Object lookup, object must exist
+ */
+ switch (lookup_status) {
+ case AE_ALREADY_EXISTS:
+
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+ message = "Failure creating";
+ break;
+
+ case AE_NOT_FOUND:
+
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+ message = "Failure looking up";
+ break;
+
+ default:
+
+ acpi_os_printf(ACPI_MSG_ERROR);
+ message = "Failure looking up";
+ break;
+ }
+
+ /* Concatenate the prefix path and the internal path */
+
+ full_path =
+ acpi_ns_build_prefixed_pathname(prefix_scope, internal_path);
+
+ acpi_os_printf("%s [%s], %s", message,
+ full_path ? full_path : "Could not get pathname",
+ acpi_format_exception(lookup_status));
+
+ if (full_path) {
+ ACPI_FREE(full_path);
+ }
+
+ ACPI_MSG_SUFFIX;
+}
+
+#ifdef __OBSOLETE_FUNCTION
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_namespace_error
*
* PARAMETERS: module_name - Caller's module name (for error output)
@@ -240,6 +312,7 @@ acpi_ut_namespace_error(const char *module_name,
ACPI_MSG_SUFFIX;
ACPI_MSG_REDIRECT_END;
}
+#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 23e766d..45eeb0d 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -206,7 +206,6 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_next_owner_id_offset = 0;
acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
acpi_gbl_osi_mutex = NULL;
- acpi_gbl_max_loop_iterations = ACPI_MAX_LOOP_COUNT;
/* Hardware oriented */
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 5f9c680..2055a85 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -134,7 +134,7 @@ acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
if ((count & 63) >= 32) {
operand_ovl.part.hi = operand_ovl.part.lo;
- operand_ovl.part.lo ^= operand_ovl.part.lo;
+ operand_ovl.part.lo = 0;
count = (count & 63) - 32;
}
ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi,
@@ -171,7 +171,7 @@ acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
if ((count & 63) >= 32) {
operand_ovl.part.lo = operand_ovl.part.hi;
- operand_ovl.part.hi ^= operand_ovl.part.hi;
+ operand_ovl.part.hi = 0;
count = (count & 63) - 32;
}
ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi,
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 5863547..524ba93 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -286,8 +286,9 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
} else {
ACPI_EXCEPTION((AE_INFO, status,
- "Thread %u could not acquire Mutex [0x%X]",
- (u32)this_thread_id, mutex_id));
+ "Thread %u could not acquire Mutex [%s] (0x%X)",
+ (u32)this_thread_id,
+ acpi_ut_get_mutex_name(mutex_id), mutex_id));
}
return (status);
@@ -322,8 +323,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
*/
if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
ACPI_ERROR((AE_INFO,
- "Mutex [0x%X] is not acquired, cannot release",
- mutex_id));
+ "Mutex [%s] (0x%X) is not acquired, cannot release",
+ acpi_ut_get_mutex_name(mutex_id), mutex_id));
return (AE_NOT_ACQUIRED);
}
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index 7926649..33a0970 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -140,7 +140,7 @@ int acpi_ut_stricmp(char *string1, char *string2)
return (c1 - c2);
}
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) || defined (ACPI_DEBUG_OUTPUT)
/*******************************************************************************
*
* FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
@@ -199,4 +199,13 @@ acpi_ut_safe_strncat(char *dest,
strncat(dest, source, max_transfer_length);
return (FALSE);
}
+
+void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size)
+{
+ /* Always terminate destination string */
+
+ strncpy(dest, source, dest_size);
+ dest[dest_size - 1] = 0;
+}
+
#endif
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 3175b13..f6b8dd2 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -101,6 +101,8 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */
{"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
{"Windows 2015", NULL, 0, ACPI_OSI_WIN_10}, /* Windows 10 - Added 03/2015 */
+ {"Windows 2016", NULL, 0, ACPI_OSI_WIN_10_RS1}, /* Windows 10 version 1607 - Added 12/2017 */
+ {"Windows 2017", NULL, 0, ACPI_OSI_WIN_10_RS2}, /* Windows 10 version 1703 - Added 12/2017 */
/* Feature Group Strings */
diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c
index 965fb5c..97f48d71 100644
--- a/drivers/acpi/acpica/utstrsuppt.c
+++ b/drivers/acpi/acpica/utstrsuppt.c
@@ -52,10 +52,9 @@ static acpi_status
acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit);
static acpi_status
-acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product);
+acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product);
-static acpi_status
-acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum);
+static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum);
/*******************************************************************************
*
@@ -357,7 +356,7 @@ acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
* FUNCTION: acpi_ut_strtoul_multiply64
*
* PARAMETERS: multiplicand - Current accumulated converted integer
- * multiplier - Base/Radix
+ * base - Base/Radix
* out_product - Where the product is returned
*
* RETURN: Status and 64-bit product
@@ -369,33 +368,40 @@ acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
******************************************************************************/
static acpi_status
-acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
+acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product)
{
- u64 val;
+ u64 product;
+ u64 quotient;
/* Exit if either operand is zero */
*out_product = 0;
- if (!multiplicand || !multiplier) {
+ if (!multiplicand || !base) {
return (AE_OK);
}
- /* Check for 64-bit overflow before the actual multiplication */
-
- acpi_ut_short_divide(ACPI_UINT64_MAX, (u32)multiplier, &val, NULL);
- if (multiplicand > val) {
+ /*
+ * Check for 64-bit overflow before the actual multiplication.
+ *
+ * Notes: 64-bit division is often not supported on 32-bit platforms
+ * (it requires a library function), Therefore ACPICA has a local
+ * 64-bit divide function. Also, Multiplier is currently only used
+ * as the radix (8/10/16), to the 64/32 divide will always work.
+ */
+ acpi_ut_short_divide(ACPI_UINT64_MAX, base, &quotient, NULL);
+ if (multiplicand > quotient) {
return (AE_NUMERIC_OVERFLOW);
}
- val = multiplicand * multiplier;
+ product = multiplicand * base;
/* Check for 32-bit overflow if necessary */
- if ((acpi_gbl_integer_bit_width == 32) && (val > ACPI_UINT32_MAX)) {
+ if ((acpi_gbl_integer_bit_width == 32) && (product > ACPI_UINT32_MAX)) {
return (AE_NUMERIC_OVERFLOW);
}
- *out_product = val;
+ *out_product = product;
return (AE_OK);
}
@@ -404,7 +410,7 @@ acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
* FUNCTION: acpi_ut_strtoul_add64
*
* PARAMETERS: addend1 - Current accumulated converted integer
- * addend2 - New hex value/char
+ * digit - New hex value/char
* out_sum - Where sum is returned (Accumulator)
*
* RETURN: Status and 64-bit sum
@@ -415,17 +421,17 @@ acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
*
******************************************************************************/
-static acpi_status acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum)
+static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum)
{
u64 sum;
/* Check for 64-bit overflow before the actual addition */
- if ((addend1 > 0) && (addend2 > (ACPI_UINT64_MAX - addend1))) {
+ if ((addend1 > 0) && (digit > (ACPI_UINT64_MAX - addend1))) {
return (AE_NUMERIC_OVERFLOW);
}
- sum = addend1 + addend2;
+ sum = addend1 + digit;
/* Check for 32-bit overflow if necessary */
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 3c8de88..633b4e2 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -402,8 +402,8 @@ acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
allocation->component = component;
allocation->line = line;
- strncpy(allocation->module, module, ACPI_MAX_MODULE_NAME);
- allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0;
+ acpi_ut_safe_strncpy(allocation->module, (char *)module,
+ ACPI_MAX_MODULE_NAME);
if (!element) {
@@ -717,7 +717,7 @@ exit:
if (!num_outstanding) {
ACPI_INFO(("No outstanding allocations"));
} else {
- ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations",
+ ACPI_ERROR((AE_INFO, "%u (0x%X) Outstanding cache allocations",
num_outstanding, num_outstanding));
}
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 950a1e5..9da4f8e 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -96,8 +96,8 @@ ACPI_EXPORT_SYMBOL(acpi_error)
*
* RETURN: None
*
- * DESCRIPTION: Print "ACPI Exception" message with module/line/version info
- * and decoded acpi_status.
+ * DESCRIPTION: Print an "ACPI Error" message with module/line/version
+ * info as well as decoded acpi_status.
*
******************************************************************************/
void ACPI_INTERNAL_VAR_XFACE
@@ -111,10 +111,10 @@ acpi_exception(const char *module_name,
/* For AE_OK, just print the message */
if (ACPI_SUCCESS(status)) {
- acpi_os_printf(ACPI_MSG_EXCEPTION);
+ acpi_os_printf(ACPI_MSG_ERROR);
} else {
- acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ",
+ acpi_os_printf(ACPI_MSG_ERROR "%s, ",
acpi_format_exception(status));
}
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 6742f6c..9bff853 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1007,7 +1007,7 @@ skip:
/* The record may be cleared by others, try read next record */
if (len == -ENOENT)
goto skip;
- else if (len < sizeof(*rcd)) {
+ else if (len < 0 || len < sizeof(*rcd)) {
rc = -EIO;
goto out;
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 6402f7f..16c4a10 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -414,6 +414,51 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
#endif
}
+/*
+ * PCIe AER errors need to be sent to the AER driver for reporting and
+ * recovery. The GHES severities map to the following AER severities and
+ * require the following handling:
+ *
+ * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
+ * These need to be reported by the AER driver but no recovery is
+ * necessary.
+ * GHES_SEV_RECOVERABLE -> AER_NONFATAL
+ * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
+ * These both need to be reported and recovered from by the AER driver.
+ * GHES_SEV_PANIC does not make it to this handling since the kernel must
+ * panic.
+ */
+static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
+{
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
+
+ if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
+ pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+ unsigned int devfn;
+ int aer_severity;
+
+ devfn = PCI_DEVFN(pcie_err->device_id.device,
+ pcie_err->device_id.function);
+ aer_severity = cper_severity_to_aer(gdata->error_severity);
+
+ /*
+ * If firmware reset the component to contain
+ * the error, we must reinitialize it before
+ * use, so treat it as a fatal AER error.
+ */
+ if (gdata->flags & CPER_SEC_RESET)
+ aer_severity = AER_FATAL;
+
+ aer_recover_queue(pcie_err->device_id.segment,
+ pcie_err->device_id.bus,
+ devfn, aer_severity,
+ (struct aer_capability_regs *)
+ pcie_err->aer_info);
+ }
+#endif
+}
+
static void ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
@@ -441,38 +486,9 @@ static void ghes_do_proc(struct ghes *ghes,
arch_apei_report_mem_error(sev, mem_err);
ghes_handle_memory_failure(gdata, sev);
}
-#ifdef CONFIG_ACPI_APEI_PCIEAER
else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
- struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
-
- if (sev == GHES_SEV_RECOVERABLE &&
- sec_sev == GHES_SEV_RECOVERABLE &&
- pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
- pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
- unsigned int devfn;
- int aer_severity;
-
- devfn = PCI_DEVFN(pcie_err->device_id.device,
- pcie_err->device_id.function);
- aer_severity = cper_severity_to_aer(gdata->error_severity);
-
- /*
- * If firmware reset the component to contain
- * the error, we must reinitialize it before
- * use, so treat it as a fatal AER error.
- */
- if (gdata->flags & CPER_SEC_RESET)
- aer_severity = AER_FATAL;
-
- aer_recover_queue(pcie_err->device_id.segment,
- pcie_err->device_id.bus,
- devfn, aer_severity,
- (struct aer_capability_regs *)
- pcie_err->aer_info);
- }
-
+ ghes_handle_aer(gdata);
}
-#endif
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
@@ -870,7 +886,6 @@ static void ghes_print_queued_estatus(void)
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
- u32 len, node_len;
llnode = llist_del_all(&ghes_estatus_llist);
/*
@@ -882,8 +897,6 @@ static void ghes_print_queued_estatus(void)
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
llnode);
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- len = cper_estatus_len(estatus);
- node_len = GHES_ESTATUS_NODE_LEN(len);
generic = estatus_node->generic;
ghes_print_estatus(NULL, generic, estatus);
llnode = llnode->next;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 13e7b56..19bc440 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -70,6 +70,7 @@ static async_cookie_t async_cookie;
static bool battery_driver_registered;
static int battery_bix_broken_package;
static int battery_notification_delay_ms;
+static int battery_full_discharging;
static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -214,9 +215,12 @@ static int acpi_battery_get_property(struct power_supply *psy,
return -ENODEV;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
- else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
+ if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) {
+ if (battery_full_discharging && battery->rate_now == 0)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ } else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (acpi_battery_is_charged(battery))
val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -1166,6 +1170,12 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
return 0;
}
+static int __init battery_full_discharging_quirk(const struct dmi_system_id *d)
+{
+ battery_full_discharging = 1;
+ return 0;
+}
+
static const struct dmi_system_id bat_dmi_table[] __initconst = {
{
.callback = battery_bix_broken_package_quirk,
@@ -1183,6 +1193,22 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
},
},
+ {
+ .callback = battery_full_discharging_quirk,
+ .ident = "ASUS GL502VSK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"),
+ },
+ },
+ {
+ .callback = battery_full_discharging_quirk,
+ .ident = "ASUS UX305LA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"),
+ },
+ },
{},
};
@@ -1237,13 +1263,11 @@ static int acpi_battery_add(struct acpi_device *device)
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
-#endif
if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
-#endif
goto fail;
}
+#endif
printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index bf8e4d3..e1eee7a 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -30,6 +30,7 @@
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <acpi/button.h>
#define PREFIX "ACPI: "
@@ -76,6 +77,22 @@ static const struct acpi_device_id button_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, button_device_ids);
+/*
+ * Some devices which don't even have a lid in anyway have a broken _LID
+ * method (e.g. pointing to a floating gpio pin) causing spurious LID events.
+ */
+static const struct dmi_system_id lid_blacklst[] = {
+ {
+ /* GP-electronic T701 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
+ DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
+ },
+ },
+ {}
+};
+
static int acpi_button_add(struct acpi_device *device);
static int acpi_button_remove(struct acpi_device *device);
static void acpi_button_notify(struct acpi_device *device, u32 event);
@@ -210,6 +227,8 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
}
/* Send the platform triggered reliable event */
if (do_update) {
+ acpi_handle_debug(device->handle, "ACPI LID %s\n",
+ state ? "open" : "closed");
input_report_switch(button->input, SW_LID, !state);
input_sync(button->input);
button->last_state = !!state;
@@ -473,6 +492,9 @@ static int acpi_button_add(struct acpi_device *device)
char *name, *class;
int error;
+ if (!strcmp(hid, ACPI_BUTTON_HID_LID) && dmi_check_system(lid_blacklst))
+ return -ENODEV;
+
button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
if (!button)
return -ENOMEM;
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 30e84cc..06ea474 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1171,7 +1171,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cpc_register_resource *desired_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
- struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+ struct cppc_pcc_data *pcc_ss_data;
int ret = 0;
if (!cpc_desc || pcc_ss_id < 0) {
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index e4ffaee..c4d0a1c 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -990,7 +990,7 @@ void acpi_subsys_complete(struct device *dev)
* the sleep state it is going out of and it has never been resumed till
* now, resume it in case the firmware powered it up.
*/
- if (dev->power.direct_complete && pm_resume_via_firmware())
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_complete);
@@ -1039,10 +1039,28 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
*/
int acpi_subsys_suspend_noirq(struct device *dev)
{
- if (dev_pm_smart_suspend_and_suspended(dev))
+ int ret;
+
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.may_skip_resume = true;
return 0;
+ }
+
+ ret = pm_generic_suspend_noirq(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * If the target system sleep state is suspend-to-idle, it is sufficient
+ * to check whether or not the device's wakeup settings are good for
+ * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
+ * acpi_subsys_complete() to take care of fixing up the device's state
+ * anyway, if need be.
+ */
+ dev->power.may_skip_resume = device_may_wakeup(dev) ||
+ !device_can_wakeup(dev);
- return pm_generic_suspend_noirq(dev);
+ return 0;
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
@@ -1052,6 +1070,9 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
*/
int acpi_subsys_resume_noirq(struct device *dev)
{
+ if (dev_pm_may_skip_resume(dev))
+ return 0;
+
/*
* Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
* during system suspend, so update their runtime PM status to "active"
@@ -1138,7 +1159,7 @@ int acpi_subsys_thaw_noirq(struct device *dev)
* skip all of the subsequent "thaw" callbacks for the device.
*/
if (dev_pm_smart_suspend_and_suspended(dev)) {
- dev->power.direct_complete = true;
+ dev_pm_skip_next_resume_phases(dev);
return 0;
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 0252c9b..d9f38c6 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1516,7 +1516,7 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
}
acpi_handle_info(ec->handle,
- "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
+ "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
ec->gpe, ec->command_addr, ec->data_addr);
return ret;
}
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 6c7dd7a..dd70d6c 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -128,7 +128,7 @@ static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
return -ENOMEM;
}
- if (!debugfs_create_x32("gpe", 0444, dev_dir, (u32 *)&first_ec->gpe))
+ if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe))
goto error;
if (!debugfs_create_bool("use_global_lock", 0444, dev_dir,
&first_ec->global_lock))
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index 46f0603..f13ba2c 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -49,6 +49,11 @@
#define MODULE_NAME "acpi-ged"
+struct acpi_ged_device {
+ struct device *dev;
+ struct list_head event_list;
+};
+
struct acpi_ged_event {
struct list_head node;
struct device *dev;
@@ -76,7 +81,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
unsigned int irq;
unsigned int gsi;
unsigned int irqflags = IRQF_ONESHOT;
- struct device *dev = context;
+ struct acpi_ged_device *geddev = context;
+ struct device *dev = geddev->dev;
acpi_handle handle = ACPI_HANDLE(dev);
acpi_handle evt_handle;
struct resource r;
@@ -102,8 +108,6 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
return AE_ERROR;
}
- dev_info(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
-
event = devm_kzalloc(dev, sizeof(*event), GFP_KERNEL);
if (!event)
return AE_ERROR;
@@ -116,29 +120,58 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
if (r.flags & IORESOURCE_IRQ_SHAREABLE)
irqflags |= IRQF_SHARED;
- if (devm_request_threaded_irq(dev, irq, NULL, acpi_ged_irq_handler,
- irqflags, "ACPI:Ged", event)) {
+ if (request_threaded_irq(irq, NULL, acpi_ged_irq_handler,
+ irqflags, "ACPI:Ged", event)) {
dev_err(dev, "failed to setup event handler for irq %u\n", irq);
return AE_ERROR;
}
+ dev_dbg(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
+ list_add_tail(&event->node, &geddev->event_list);
return AE_OK;
}
static int ged_probe(struct platform_device *pdev)
{
+ struct acpi_ged_device *geddev;
acpi_status acpi_ret;
+ geddev = devm_kzalloc(&pdev->dev, sizeof(*geddev), GFP_KERNEL);
+ if (!geddev)
+ return -ENOMEM;
+
+ geddev->dev = &pdev->dev;
+ INIT_LIST_HEAD(&geddev->event_list);
acpi_ret = acpi_walk_resources(ACPI_HANDLE(&pdev->dev), "_CRS",
- acpi_ged_request_interrupt, &pdev->dev);
+ acpi_ged_request_interrupt, geddev);
if (ACPI_FAILURE(acpi_ret)) {
dev_err(&pdev->dev, "unable to parse the _CRS record\n");
return -EINVAL;
}
+ platform_set_drvdata(pdev, geddev);
return 0;
}
+static void ged_shutdown(struct platform_device *pdev)
+{
+ struct acpi_ged_device *geddev = platform_get_drvdata(pdev);
+ struct acpi_ged_event *event, *next;
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+ free_irq(event->irq, event);
+ list_del(&event->node);
+ dev_dbg(geddev->dev, "GED releasing GSI %u @ IRQ %u\n",
+ event->gsi, event->irq);
+ }
+}
+
+static int ged_remove(struct platform_device *pdev)
+{
+ ged_shutdown(pdev);
+ return 0;
+}
+
static const struct acpi_device_id ged_acpi_ids[] = {
{"ACPI0013"},
{},
@@ -146,6 +179,8 @@ static const struct acpi_device_id ged_acpi_ids[] = {
static struct platform_driver ged_driver = {
.probe = ged_probe,
+ .remove = ged_remove,
+ .shutdown = ged_shutdown,
.driver = {
.name = MODULE_NAME,
.acpi_match_table = ACPI_PTR(ged_acpi_ids),
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7f43423..1d0a501 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -159,7 +159,7 @@ static inline void acpi_early_processor_osc(void) {}
-------------------------------------------------------------------------- */
struct acpi_ec {
acpi_handle handle;
- unsigned long gpe;
+ u32 gpe;
unsigned long command_addr;
unsigned long data_addr;
bool global_lock;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index ff2580e..abeb4df 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1670,6 +1670,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
dev_name(&adev_dimm->dev));
return -ENXIO;
}
+ /*
+ * Record nfit_mem for the notification path to track back to
+ * the nfit sysfs attributes for this dimm device object.
+ */
+ dev_set_drvdata(&adev_dimm->dev, nfit_mem);
/*
* Until standardization materializes we need to consider 4
@@ -1752,9 +1757,11 @@ static void shutdown_dimm_notify(void *data)
sysfs_put(nfit_mem->flags_attr);
nfit_mem->flags_attr = NULL;
}
- if (adev_dimm)
+ if (adev_dimm) {
acpi_remove_notify_handler(adev_dimm->handle,
ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
+ dev_set_drvdata(&adev_dimm->dev, NULL);
+ }
}
mutex_unlock(&acpi_desc->init_mutex);
}
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 917f1cc..8ccaae3 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -460,8 +460,7 @@ int __init acpi_numa_init(void)
srat_proc, ARRAY_SIZE(srat_proc), 0);
cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
- acpi_parse_memory_affinity,
- NR_NODE_MEMBLKS);
+ acpi_parse_memory_affinity, 0);
}
/* SLIT: System Locality Information Table */
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index bc3d914..85ad679 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -612,7 +612,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_isa_irq_penalty[link->irq.active] +=
PIRQ_PENALTY_PCI_USING;
- printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
+ pr_info("%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
}
diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c
index 90011aa..886ac8b 100644
--- a/drivers/acpi/pmic/intel_pmic_bxtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_bxtwc.c
@@ -400,7 +400,7 @@ static int intel_bxtwc_pmic_opregion_probe(struct platform_device *pdev)
&intel_bxtwc_pmic_opregion_data);
}
-static struct platform_device_id bxt_wc_opregion_id_table[] = {
+static const struct platform_device_id bxt_wc_opregion_id_table[] = {
{ .name = "bxt_wcove_region" },
{},
};
@@ -412,9 +412,4 @@ static struct platform_driver intel_bxtwc_pmic_opregion_driver = {
},
.id_table = bxt_wc_opregion_id_table,
};
-
-static int __init intel_bxtwc_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_bxtwc_pmic_opregion_driver);
-}
-device_initcall(intel_bxtwc_pmic_opregion_driver_init);
+builtin_platform_driver(intel_bxtwc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index 109c1e9..f6d73a2 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -131,7 +131,4 @@ static struct platform_driver chtdc_ti_pmic_opregion_driver = {
},
.id_table = chtdc_ti_pmic_opregion_id_table,
};
-module_platform_driver(chtdc_ti_pmic_opregion_driver);
-
-MODULE_DESCRIPTION("Dollar Cove TI PMIC opregion driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(chtdc_ti_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_chtwc.c b/drivers/acpi/pmic/intel_pmic_chtwc.c
index 85636d7..9912422 100644
--- a/drivers/acpi/pmic/intel_pmic_chtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_chtwc.c
@@ -260,11 +260,10 @@ static int intel_cht_wc_pmic_opregion_probe(struct platform_device *pdev)
&intel_cht_wc_pmic_opregion_data);
}
-static struct platform_device_id cht_wc_opregion_id_table[] = {
+static const struct platform_device_id cht_wc_opregion_id_table[] = {
{ .name = "cht_wcove_region" },
{},
};
-MODULE_DEVICE_TABLE(platform, cht_wc_opregion_id_table);
static struct platform_driver intel_cht_wc_pmic_opregion_driver = {
.probe = intel_cht_wc_pmic_opregion_probe,
@@ -273,8 +272,4 @@ static struct platform_driver intel_cht_wc_pmic_opregion_driver = {
},
.id_table = cht_wc_opregion_id_table,
};
-module_platform_driver(intel_cht_wc_pmic_opregion_driver);
-
-MODULE_DESCRIPTION("Intel CHT Whiskey Cove PMIC operation region driver");
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(intel_cht_wc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
index d7f1761..7ffa740 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -201,9 +201,4 @@ static struct platform_driver intel_crc_pmic_opregion_driver = {
.name = "crystal_cove_pmic",
},
};
-
-static int __init intel_crc_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_crc_pmic_opregion_driver);
-}
-device_initcall(intel_crc_pmic_opregion_driver_init);
+builtin_platform_driver(intel_crc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 6c99d3f..316e551 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -278,9 +278,4 @@ static struct platform_driver intel_xpower_pmic_opregion_driver = {
.name = "axp288_pmic_acpi",
},
};
-
-static int __init intel_xpower_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_xpower_pmic_opregion_driver);
-}
-device_initcall(intel_xpower_pmic_opregion_driver_init);
+builtin_platform_driver(intel_xpower_pmic_opregion_driver);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d50a7b6..5f0071c 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -207,6 +207,7 @@ static void tsc_check_state(int state)
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
+ case X86_VENDOR_CENTAUR:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 8082871..46cde091 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -367,10 +367,20 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
{},
};
+static bool ignore_blacklist;
+
+void __init acpi_sleep_no_blacklist(void)
+{
+ ignore_blacklist = true;
+}
+
static void __init acpi_sleep_dmi_check(void)
{
int year;
+ if (ignore_blacklist)
+ return;
+
if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2012)
acpi_nvs_nosave_s3();
@@ -697,7 +707,8 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_ENTRY 5
#define ACPI_LPS0_EXIT 6
-#define ACPI_S2IDLE_FUNC_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
+#define ACPI_LPS0_SCREEN_MASK ((1 << ACPI_LPS0_SCREEN_OFF) | (1 << ACPI_LPS0_SCREEN_ON))
+#define ACPI_LPS0_PLATFORM_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
@@ -900,7 +911,8 @@ static int lps0_device_attach(struct acpi_device *adev,
if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
char bitmask = *(char *)out_obj->buffer.pointer;
- if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) {
+ if ((bitmask & ACPI_LPS0_PLATFORM_MASK) == ACPI_LPS0_PLATFORM_MASK ||
+ (bitmask & ACPI_LPS0_SCREEN_MASK) == ACPI_LPS0_SCREEN_MASK) {
lps0_dsm_func_mask = bitmask;
lps0_device_handle = adev->handle;
/*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 06a150b..4fc59c3 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -816,14 +816,8 @@ end:
* interface:
* echo unmask > /sys/firmware/acpi/interrupts/gpe00
*/
-
-/*
- * Currently, the GPE flooding prevention only supports to mask the GPEs
- * numbered from 00 to 7f.
- */
-#define ACPI_MASKABLE_GPE_MAX 0x80
-
-static u64 __initdata acpi_masked_gpes;
+#define ACPI_MASKABLE_GPE_MAX 0xFF
+static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
static int __init acpi_gpe_set_masked_gpes(char *val)
{
@@ -831,7 +825,7 @@ static int __init acpi_gpe_set_masked_gpes(char *val)
if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
return -EINVAL;
- acpi_masked_gpes |= ((u64)1<<gpe);
+ set_bit(gpe, acpi_masked_gpes_map);
return 1;
}
@@ -843,15 +837,11 @@ void __init acpi_gpe_apply_masked_gpes(void)
acpi_status status;
u8 gpe;
- for (gpe = 0;
- gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
- gpe++) {
- if (acpi_masked_gpes & ((u64)1<<gpe)) {
- status = acpi_get_gpe_device(gpe, &handle);
- if (ACPI_SUCCESS(status)) {
- pr_info("Masking GPE 0x%x.\n", gpe);
- (void)acpi_mask_gpe(handle, gpe, TRUE);
- }
+ for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
+ status = acpi_get_gpe_device(gpe, &handle);
+ if (ACPI_SUCCESS(status)) {
+ pr_info("Masking GPE 0x%x.\n", gpe);
+ (void)acpi_mask_gpe(handle, gpe, TRUE);
}
}
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 9d49a1a..78db976 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -737,16 +737,17 @@ bool acpi_dev_found(const char *hid)
}
EXPORT_SYMBOL(acpi_dev_found);
-struct acpi_dev_present_info {
+struct acpi_dev_match_info {
+ const char *dev_name;
struct acpi_device_id hid[2];
const char *uid;
s64 hrv;
};
-static int acpi_dev_present_cb(struct device *dev, void *data)
+static int acpi_dev_match_cb(struct device *dev, void *data)
{
struct acpi_device *adev = to_acpi_device(dev);
- struct acpi_dev_present_info *match = data;
+ struct acpi_dev_match_info *match = data;
unsigned long long hrv;
acpi_status status;
@@ -757,6 +758,8 @@ static int acpi_dev_present_cb(struct device *dev, void *data)
strcmp(adev->pnp.unique_id, match->uid)))
return 0;
+ match->dev_name = acpi_dev_name(adev);
+
if (match->hrv == -1)
return 1;
@@ -789,20 +792,44 @@ static int acpi_dev_present_cb(struct device *dev, void *data)
*/
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
{
- struct acpi_dev_present_info match = {};
+ struct acpi_dev_match_info match = {};
struct device *dev;
strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
match.uid = uid;
match.hrv = hrv;
- dev = bus_find_device(&acpi_bus_type, NULL, &match,
- acpi_dev_present_cb);
-
+ dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
return !!dev;
}
EXPORT_SYMBOL(acpi_dev_present);
+/**
+ * acpi_dev_get_first_match_name - Return name of first match of ACPI device
+ * @hid: Hardware ID of the device.
+ * @uid: Unique ID of the device, pass NULL to not check _UID
+ * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
+ *
+ * Return device name if a matching device was present
+ * at the moment of invocation, or NULL otherwise.
+ *
+ * See additional information in acpi_dev_present() as well.
+ */
+const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+{
+ struct acpi_dev_match_info match = {};
+ struct device *dev;
+
+ strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
+ match.uid = uid;
+ match.hrv = hrv;
+
+ dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
+ return dev ? match.dev_name : NULL;
+}
+EXPORT_SYMBOL(acpi_dev_get_first_match_name);
+
/*
* acpi_backlight= handling, this is done here rather then in video_detect.c
* because __setup cannot be used in modules.
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index bccec9d..a7ecfde 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -482,7 +482,8 @@ enum binder_deferred_state {
* @tsk task_struct for group_leader of process
* (invariant after initialized)
* @files files_struct for process
- * (invariant after initialized)
+ * (protected by @files_lock)
+ * @files_lock mutex to protect @files
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
@@ -530,6 +531,7 @@ struct binder_proc {
int pid;
struct task_struct *tsk;
struct files_struct *files;
+ struct mutex files_lock;
struct hlist_node deferred_work_node;
int deferred_work;
bool is_dead;
@@ -877,20 +879,26 @@ static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
- struct files_struct *files = proc->files;
unsigned long rlim_cur;
unsigned long irqs;
+ int ret;
- if (files == NULL)
- return -ESRCH;
-
- if (!lock_task_sighand(proc->tsk, &irqs))
- return -EMFILE;
-
+ mutex_lock(&proc->files_lock);
+ if (proc->files == NULL) {
+ ret = -ESRCH;
+ goto err;
+ }
+ if (!lock_task_sighand(proc->tsk, &irqs)) {
+ ret = -EMFILE;
+ goto err;
+ }
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs);
- return __alloc_fd(files, 0, rlim_cur, flags);
+ ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
+err:
+ mutex_unlock(&proc->files_lock);
+ return ret;
}
/*
@@ -899,8 +907,10 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file)
{
+ mutex_lock(&proc->files_lock);
if (proc->files)
__fd_install(proc->files, fd, file);
+ mutex_unlock(&proc->files_lock);
}
/*
@@ -910,9 +920,11 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
{
int retval;
- if (proc->files == NULL)
- return -ESRCH;
-
+ mutex_lock(&proc->files_lock);
+ if (proc->files == NULL) {
+ retval = -ESRCH;
+ goto err;
+ }
retval = __close_fd(proc->files, fd);
/* can't restart close syscall because file table entry was cleared */
if (unlikely(retval == -ERESTARTSYS ||
@@ -920,7 +932,8 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK))
retval = -EINTR;
-
+err:
+ mutex_unlock(&proc->files_lock);
return retval;
}
@@ -4627,7 +4640,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
ret = binder_alloc_mmap_handler(&proc->alloc, vma);
if (ret)
return ret;
+ mutex_lock(&proc->files_lock);
proc->files = get_files_struct(current);
+ mutex_unlock(&proc->files_lock);
return 0;
err_bad_arg:
@@ -4651,6 +4666,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
+ mutex_init(&proc->files_lock);
INIT_LIST_HEAD(&proc->todo);
proc->default_priority = task_nice(current);
binder_dev = container_of(filp->private_data, struct binder_device,
@@ -4903,9 +4919,11 @@ static void binder_deferred_func(struct work_struct *work)
files = NULL;
if (defer & BINDER_DEFERRED_PUT_FILES) {
+ mutex_lock(&proc->files_lock);
files = proc->files;
if (files)
proc->files = NULL;
+ mutex_unlock(&proc->files_lock);
}
if (defer & BINDER_DEFERRED_FLUSH)
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index 80854f7..0ae6971 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -1,5 +1,5 @@
/*
- * MeidaTek AHCI SATA driver
+ * MediaTek AHCI SATA driver
*
* Copyright (c) 2017 MediaTek Inc.
* Author: Ryder Lee <ryder.lee@mediatek.com>
@@ -25,7 +25,7 @@
#include <linux/reset.h>
#include "ahci.h"
-#define DRV_NAME "ahci"
+#define DRV_NAME "ahci-mtk"
#define SYS_CFG 0x14
#define SYS_CFG_SATA_MSK GENMASK(31, 30)
@@ -192,5 +192,5 @@ static struct platform_driver mtk_ahci_driver = {
};
module_platform_driver(mtk_ahci_driver);
-MODULE_DESCRIPTION("MeidaTek SATA AHCI Driver");
+MODULE_DESCRIPTION("MediaTek SATA AHCI Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index b6b0bf7..2685f28 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -35,6 +35,8 @@
/* port register default value */
#define AHCI_PORT_PHY_1_CFG 0xa003fffe
+#define AHCI_PORT_PHY2_CFG 0x28184d1f
+#define AHCI_PORT_PHY3_CFG 0x0e081509
#define AHCI_PORT_TRANS_CFG 0x08000029
#define AHCI_PORT_AXICC_CFG 0x3fffffff
@@ -183,6 +185,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -190,6 +194,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
case AHCI_LS2080A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -201,6 +207,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -212,6 +220,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A,
qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -219,6 +229,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
case AHCI_LS2088A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
+ writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 2a88292..3c09122 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3082,13 +3082,19 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
bit = fls(mask) - 1;
mask &= ~(1 << bit);
- /* Mask off all speeds higher than or equal to the current
- * one. Force 1.5Gbps if current SPD is not available.
+ /*
+ * Mask off all speeds higher than or equal to the current one. At
+ * this point, if current SPD is not available and we previously
+ * recorded the link speed from SStatus, the driver has already
+ * masked off the highest bit so mask should already be 1 or 0.
+ * Otherwise, we should not force 1.5Gbps on a link where we have
+ * not previously recorded speed from SStatus. Just return in this
+ * case.
*/
if (spd > 1)
mask &= (1 << (spd - 1)) - 1;
else
- mask &= 1;
+ return -EINVAL;
/* were we already at the bottom? */
if (!mask)
@@ -4443,6 +4449,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
*/
{ "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+ { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
/* Devices we expect to fail diagnostics */
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index ffd8d33..6db2e34 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -82,7 +82,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
* is issued to the device. However, if the controller clock is 133MHz,
* the following tables must be used.
*/
-static struct pdc2027x_pio_timing {
+static const struct pdc2027x_pio_timing {
u8 value0, value1, value2;
} pdc2027x_pio_timing_tbl[] = {
{ 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
@@ -92,7 +92,7 @@ static struct pdc2027x_pio_timing {
{ 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
};
-static struct pdc2027x_mdma_timing {
+static const struct pdc2027x_mdma_timing {
u8 value0, value1;
} pdc2027x_mdma_timing_tbl[] = {
{ 0xdf, 0x5f }, /* MDMA mode 0 */
@@ -100,7 +100,7 @@ static struct pdc2027x_mdma_timing {
{ 0x69, 0x25 }, /* MDMA mode 2 */
};
-static struct pdc2027x_udma_timing {
+static const struct pdc2027x_udma_timing {
u8 value0, value1, value2;
} pdc2027x_udma_timing_tbl[] = {
{ 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
@@ -649,7 +649,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
* @host: target ATA host
* @board_idx: board identifier
*/
-static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
+static void pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
{
long pll_clock;
@@ -665,8 +665,6 @@ static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
/* Adjust PLL control register */
pdc_adjust_pll(host, pll_clock, board_idx);
-
- return 0;
}
/**
@@ -753,8 +751,7 @@ static int pdc2027x_init_one(struct pci_dev *pdev,
//pci_enable_intx(pdev);
/* initialize adapter */
- if (pdc_hardware_init(host, board_idx) != 0)
- return -EIO;
+ pdc_hardware_init(host, board_idx);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
@@ -778,8 +775,7 @@ static int pdc2027x_reinit_one(struct pci_dev *pdev)
else
board_idx = PDC_UDMA_133;
- if (pdc_hardware_init(host, board_idx))
- return -EIO;
+ pdc_hardware_init(host, board_idx);
ata_host_resume(host);
return 0;
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index bdc8790..2415ad9 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -236,6 +236,9 @@ config GENERIC_CPU_DEVICES
config GENERIC_CPU_AUTOPROBE
bool
+config GENERIC_CPU_VULNERABILITIES
+ bool
+
config SOC_BUS
bool
select GLOB
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index eb3af27..07532d8 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -186,6 +186,11 @@ static void cache_associativity(struct cacheinfo *this_leaf)
this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
}
+static bool cache_node_is_unified(struct cacheinfo *this_leaf)
+{
+ return of_property_read_bool(this_leaf->of_node, "cache-unified");
+}
+
static void cache_of_override_properties(unsigned int cpu)
{
int index;
@@ -194,6 +199,14 @@ static void cache_of_override_properties(unsigned int cpu)
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = this_cpu_ci->info_list + index;
+ /*
+ * init_cache_level must setup the cache level correctly
+ * overriding the architecturally specified levels, so
+ * if type is NONE at this stage, it should be unified
+ */
+ if (this_leaf->type == CACHE_TYPE_NOCACHE &&
+ cache_node_is_unified(this_leaf))
+ this_leaf->type = CACHE_TYPE_UNIFIED;
cache_size(this_leaf);
cache_get_line_size(this_leaf);
cache_nr_sets(this_leaf);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 58a9b60..d990384 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -511,10 +511,58 @@ static void __init cpu_dev_register_generic(void)
#endif
}
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+
+ssize_t __weak cpu_show_meltdown(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+
+static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+ &dev_attr_spectre_v1.attr,
+ &dev_attr_spectre_v2.attr,
+ NULL
+};
+
+static const struct attribute_group cpu_root_vulnerabilities_group = {
+ .name = "vulnerabilities",
+ .attrs = cpu_root_vulnerabilities_attrs,
+};
+
+static void __init cpu_register_vulnerabilities(void)
+{
+ if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpu_root_vulnerabilities_group))
+ pr_err("Unable to register CPU vulnerabilities\n");
+}
+
+#else
+static inline void cpu_register_vulnerabilities(void) { }
+#endif
+
void __init cpu_dev_init(void)
{
if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
panic("Failed to register CPU subsystem");
cpu_dev_register_generic();
+ cpu_register_vulnerabilities();
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 0c80bea..528b241 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1032,15 +1032,12 @@ static int genpd_prepare(struct device *dev)
static int genpd_finish_suspend(struct device *dev, bool poweroff)
{
struct generic_pm_domain *genpd;
- int ret;
+ int ret = 0;
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
- if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
- return 0;
-
if (poweroff)
ret = pm_generic_poweroff_noirq(dev);
else
@@ -1048,10 +1045,19 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
if (ret)
return ret;
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_suspend(dev);
- if (ret)
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
+ return 0;
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret) {
+ if (poweroff)
+ pm_generic_restore_noirq(dev);
+ else
+ pm_generic_resume_noirq(dev);
return ret;
+ }
}
genpd_lock(genpd);
@@ -1085,7 +1091,7 @@ static int genpd_suspend_noirq(struct device *dev)
static int genpd_resume_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
- int ret = 0;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -1094,21 +1100,21 @@ static int genpd_resume_noirq(struct device *dev)
return -EINVAL;
if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
- return 0;
+ return pm_generic_resume_noirq(dev);
genpd_lock(genpd);
genpd_sync_power_on(genpd, true, 0);
genpd->suspended_count--;
genpd_unlock(genpd);
- if (genpd->dev_ops.stop && genpd->dev_ops.start)
- ret = pm_runtime_force_resume(dev);
-
- ret = pm_generic_resume_noirq(dev);
- if (ret)
- return ret;
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ return ret;
+ }
- return ret;
+ return pm_generic_resume_noirq(dev);
}
/**
@@ -1135,8 +1141,9 @@ static int genpd_freeze_noirq(struct device *dev)
if (ret)
return ret;
- if (genpd->dev_ops.stop && genpd->dev_ops.start)
- ret = pm_runtime_force_suspend(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev))
+ ret = genpd_stop_dev(genpd, dev);
return ret;
}
@@ -1159,8 +1166,9 @@ static int genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_resume(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
if (ret)
return ret;
}
@@ -1217,8 +1225,9 @@ static int genpd_restore_noirq(struct device *dev)
genpd_sync_power_on(genpd, true, 0);
genpd_unlock(genpd);
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_resume(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
if (ret)
return ret;
}
@@ -2199,20 +2208,8 @@ int genpd_dev_pm_attach(struct device *dev)
ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells", 0, &pd_args);
- if (ret < 0) {
- if (ret != -ENOENT)
- return ret;
-
- /*
- * Try legacy Samsung-specific bindings
- * (for backwards compatibility of DT ABI)
- */
- pd_args.args_count = 0;
- pd_args.np = of_parse_phandle(dev->of_node,
- "samsung,power-domain", 0);
- if (!pd_args.np)
- return -ENOENT;
- }
+ if (ret < 0)
+ return ret;
mutex_lock(&gpd_list_lock);
pd = genpd_get_from_provider(&pd_args);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index db2f044..02a497e 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -18,7 +18,6 @@
*/
#include <linux/device.h>
-#include <linux/kallsyms.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pm.h>
@@ -526,6 +525,88 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
/*------------------------- Resume routines -------------------------*/
/**
+ * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
+ * @dev: Target device.
+ *
+ * Make the core skip the "early resume" and "resume" phases for @dev.
+ *
+ * This function can be called by middle-layer code during the "noirq" phase of
+ * system resume if necessary, but not by device drivers.
+ */
+void dev_pm_skip_next_resume_phases(struct device *dev)
+{
+ dev->power.is_late_suspended = false;
+ dev->power.is_suspended = false;
+}
+
+/**
+ * suspend_event - Return a "suspend" message for given "resume" one.
+ * @resume_msg: PM message representing a system-wide resume transition.
+ */
+static pm_message_t suspend_event(pm_message_t resume_msg)
+{
+ switch (resume_msg.event) {
+ case PM_EVENT_RESUME:
+ return PMSG_SUSPEND;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RESTORE:
+ return PMSG_FREEZE;
+ case PM_EVENT_RECOVER:
+ return PMSG_HIBERNATE;
+ }
+ return PMSG_ON;
+}
+
+/**
+ * dev_pm_may_skip_resume - System-wide device resume optimization check.
+ * @dev: Target device.
+ *
+ * Checks whether or not the device may be left in suspend after a system-wide
+ * transition to the working state.
+ */
+bool dev_pm_may_skip_resume(struct device *dev)
+{
+ return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
+}
+
+static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
+static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p);
+
+static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p);
+
+/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -536,8 +617,9 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
*/
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
+ bool skip_resume;
int error = 0;
TRACE_DEVICE(dev);
@@ -551,29 +633,61 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
dpm_wait_for_superior(dev, async);
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
+ skip_resume = dev_pm_may_skip_resume(dev);
+
+ callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
+ if (callback)
+ goto Run;
+
+ if (skip_resume)
+ goto Skip;
+
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ pm_message_t suspend_msg = suspend_event(state);
+
+ /*
+ * If "freeze" callbacks have been skipped during a transition
+ * related to hibernation, the subsequent "thaw" callbacks must
+ * be skipped too or bad things may happen. Otherwise, resume
+ * callbacks are going to be run for the device, so its runtime
+ * PM status must be changed to reflect the new state after the
+ * transition under way.
+ */
+ if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
+ !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
+ if (state.event == PM_EVENT_THAW) {
+ skip_resume = true;
+ goto Skip;
+ } else {
+ pm_runtime_set_active(dev);
+ }
+ }
}
- if (!callback && dev->driver && dev->driver->pm) {
+ if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
dev->power.is_noirq_suspended = false;
- Out:
+ if (skip_resume) {
+ /*
+ * The device is going to be left in suspend, but it might not
+ * have been in runtime suspend before the system suspended, so
+ * its runtime PM status needs to be updated to avoid confusing
+ * the runtime PM framework when runtime PM is enabled for the
+ * device again.
+ */
+ pm_runtime_set_suspended(dev);
+ dev_pm_skip_next_resume_phases(dev);
+ }
+
+Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
@@ -666,6 +780,35 @@ void dpm_resume_noirq(pm_message_t state)
dpm_noirq_end();
}
+static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "early power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "early type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "early class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "early bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
@@ -676,8 +819,8 @@ void dpm_resume_noirq(pm_message_t state)
*/
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
int error = 0;
TRACE_DEVICE(dev);
@@ -691,19 +834,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
dpm_wait_for_superior(dev, async);
- if (dev->pm_domain) {
- info = "early power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "early type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "early class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "early bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_resume_early_cb(dev, state, &info);
if (!callback && dev->driver && dev->driver->pm) {
info = "early driver ";
@@ -1074,6 +1205,77 @@ static pm_message_t resume_event(pm_message_t sleep_state)
return PMSG_ON;
}
+static void dpm_superior_set_must_resume(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ if (dev->parent)
+ dev->parent->power.must_resume = true;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ link->supplier->power.must_resume = true;
+
+ device_links_read_unlock(idx);
+}
+
+static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
+static bool device_must_resume(struct device *dev, pm_message_t state,
+ bool no_subsys_suspend_noirq)
+{
+ pm_message_t resume_msg = resume_event(state);
+
+ /*
+ * If all of the device driver's "noirq", "late" and "early" callbacks
+ * are invoked directly by the core, the decision to allow the device to
+ * stay in suspend can be based on its current runtime PM status and its
+ * wakeup settings.
+ */
+ if (no_subsys_suspend_noirq &&
+ !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
+ !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
+ !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
+ return !pm_runtime_status_suspended(dev) &&
+ (resume_msg.event != PM_EVENT_RESUME ||
+ (device_can_wakeup(dev) && !device_may_wakeup(dev)));
+
+ /*
+ * The only safe strategy here is to require that if the device may not
+ * be left in suspend, resume callbacks must be invoked for it.
+ */
+ return !dev->power.may_skip_resume;
+}
+
/**
* __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
@@ -1085,8 +1287,9 @@ static pm_message_t resume_event(pm_message_t sleep_state)
*/
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
+ bool no_subsys_cb = false;
int error = 0;
TRACE_DEVICE(dev);
@@ -1105,30 +1308,40 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
+ if (callback)
+ goto Run;
- if (!callback && dev->driver && dev->driver->pm) {
+ no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
+
+ if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
- if (!error)
- dev->power.is_noirq_suspended = true;
- else
+ if (error) {
async_error = error;
+ goto Complete;
+ }
+
+Skip:
+ dev->power.is_noirq_suspended = true;
+
+ if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
+ dev->power.must_resume = dev->power.must_resume ||
+ atomic_read(&dev->power.usage_count) > 1 ||
+ device_must_resume(dev, state, no_subsys_cb);
+ } else {
+ dev->power.must_resume = true;
+ }
+
+ if (dev->power.must_resume)
+ dpm_superior_set_must_resume(dev);
Complete:
complete_all(&dev->power.completion);
@@ -1234,6 +1447,50 @@ int dpm_suspend_noirq(pm_message_t state)
return ret;
}
+static void dpm_propagate_wakeup_to_parent(struct device *dev)
+{
+ struct device *parent = dev->parent;
+
+ if (!parent)
+ return;
+
+ spin_lock_irq(&parent->power.lock);
+
+ if (dev->power.wakeup_path && !parent->power.ignore_children)
+ parent->power.wakeup_path = true;
+
+ spin_unlock_irq(&parent->power.lock);
+}
+
+static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "late type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "late class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "late bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
/**
* __device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
@@ -1244,8 +1501,8 @@ int dpm_suspend_noirq(pm_message_t state)
*/
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
int error = 0;
TRACE_DEVICE(dev);
@@ -1266,30 +1523,29 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- if (dev->pm_domain) {
- info = "late power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "late type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "late class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "late bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_suspend_late_cb(dev, state, &info);
+ if (callback)
+ goto Run;
- if (!callback && dev->driver && dev->driver->pm) {
+ if (dev_pm_smart_suspend_and_suspended(dev) &&
+ !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
info = "late driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
- if (!error)
- dev->power.is_late_suspended = true;
- else
+ if (error) {
async_error = error;
+ goto Complete;
+ }
+ dpm_propagate_wakeup_to_parent(dev);
+
+Skip:
+ dev->power.is_late_suspended = true;
Complete:
TRACE_SUSPEND(error);
@@ -1420,11 +1676,17 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
-static void dpm_clear_suppliers_direct_complete(struct device *dev)
+static void dpm_clear_superiors_direct_complete(struct device *dev)
{
struct device_link *link;
int idx;
+ if (dev->parent) {
+ spin_lock_irq(&dev->parent->power.lock);
+ dev->parent->power.direct_complete = false;
+ spin_unlock_irq(&dev->parent->power.lock);
+ }
+
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
@@ -1485,6 +1747,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dev->power.direct_complete = false;
}
+ dev->power.may_skip_resume = false;
+ dev->power.must_resume = false;
+
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -1528,20 +1793,12 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
End:
if (!error) {
- struct device *parent = dev->parent;
-
dev->power.is_suspended = true;
- if (parent) {
- spin_lock_irq(&parent->power.lock);
-
- dev->parent->power.direct_complete = false;
- if (dev->power.wakeup_path
- && !dev->parent->power.ignore_children)
- dev->parent->power.wakeup_path = true;
+ if (device_may_wakeup(dev))
+ dev->power.wakeup_path = true;
- spin_unlock_irq(&parent->power.lock);
- }
- dpm_clear_suppliers_direct_complete(dev);
+ dpm_propagate_wakeup_to_parent(dev);
+ dpm_clear_superiors_direct_complete(dev);
}
device_unlock(dev);
@@ -1650,8 +1907,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
if (dev->power.syscore)
return 0;
- WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
- !pm_runtime_enabled(dev));
+ WARN_ON(!pm_runtime_enabled(dev) &&
+ dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED));
/*
* If a device's parent goes into runtime suspend at the wrong time,
@@ -1663,7 +1921,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
- dev->power.wakeup_path = device_may_wakeup(dev);
+ dev->power.wakeup_path = false;
if (dev->power.no_pm_callbacks) {
ret = 1; /* Let device go direct_complete */
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 7beee75..21244c5 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -41,20 +41,15 @@ extern void dev_pm_disable_wake_irq_check(struct device *dev);
#ifdef CONFIG_PM_SLEEP
-extern int device_wakeup_attach_irq(struct device *dev,
- struct wake_irq *wakeirq);
+extern void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq);
extern void device_wakeup_detach_irq(struct device *dev);
extern void device_wakeup_arm_wake_irqs(void);
extern void device_wakeup_disarm_wake_irqs(void);
#else
-static inline int
-device_wakeup_attach_irq(struct device *dev,
- struct wake_irq *wakeirq)
-{
- return 0;
-}
+static inline void device_wakeup_attach_irq(struct device *dev,
+ struct wake_irq *wakeirq) {}
static inline void device_wakeup_detach_irq(struct device *dev)
{
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 6e89b51..8bef3cb 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1613,22 +1613,34 @@ void pm_runtime_drop_link(struct device *dev)
spin_unlock_irq(&dev->power.lock);
}
+static bool pm_runtime_need_not_resume(struct device *dev)
+{
+ return atomic_read(&dev->power.usage_count) <= 1 &&
+ (atomic_read(&dev->power.child_count) == 0 ||
+ dev->power.ignore_children);
+}
+
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
*
* Disable runtime PM so we safely can check the device's runtime PM status and
- * if it is active, invoke it's .runtime_suspend callback to bring it into
- * suspend state. Keep runtime PM disabled to preserve the state unless we
- * encounter errors.
+ * if it is active, invoke its ->runtime_suspend callback to suspend it and
+ * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
+ * usage and children counters don't indicate that the device was in use before
+ * the system-wide transition under way, decrement its parent's children counter
+ * (if there is a parent). Keep runtime PM disabled to preserve the state
+ * unless we encounter errors.
*
* Typically this function may be invoked from a system suspend callback to make
- * sure the device is put into low power state.
+ * sure the device is put into low power state and it should only be used during
+ * system-wide PM transitions to sleep states. It assumes that the analogous
+ * pm_runtime_force_resume() will be used to resume the device.
*/
int pm_runtime_force_suspend(struct device *dev)
{
int (*callback)(struct device *);
- int ret = 0;
+ int ret;
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev))
@@ -1636,27 +1648,23 @@ int pm_runtime_force_suspend(struct device *dev)
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
- if (!callback) {
- ret = -ENOSYS;
- goto err;
- }
-
- ret = callback(dev);
+ ret = callback ? callback(dev) : 0;
if (ret)
goto err;
/*
- * Increase the runtime PM usage count for the device's parent, in case
- * when we find the device being used when system suspend was invoked.
- * This informs pm_runtime_force_resume() to resume the parent
- * immediately, which is needed to be able to resume its children,
- * when not deferring the resume to be managed via runtime PM.
+ * If the device can stay in suspend after the system-wide transition
+ * to the working state that will follow, drop the children counter of
+ * its parent, but set its status to RPM_SUSPENDED anyway in case this
+ * function will be called again for it in the meantime.
*/
- if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
- pm_runtime_get_noresume(dev->parent);
+ if (pm_runtime_need_not_resume(dev))
+ pm_runtime_set_suspended(dev);
+ else
+ __update_runtime_status(dev, RPM_SUSPENDED);
- pm_runtime_set_suspended(dev);
return 0;
+
err:
pm_runtime_enable(dev);
return ret;
@@ -1669,13 +1677,9 @@ EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
*
* Prior invoking this function we expect the user to have brought the device
* into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and brings the device into full power, if it is expected to be
- * used on system resume. To distinguish that, we check whether the runtime PM
- * usage count is greater than 1 (the PM core increases the usage count in the
- * system PM prepare phase), as that indicates a real user (such as a subsystem,
- * driver, userspace, etc.) is using it. If that is the case, the device is
- * expected to be used on system resume as well, so then we resume it. In the
- * other case, we defer the resume to be managed via runtime PM.
+ * those actions and bring the device into full power, if it is expected to be
+ * used on system resume. In the other case, we defer the resume to be managed
+ * via runtime PM.
*
* Typically this function may be invoked from a system resume callback.
*/
@@ -1684,32 +1688,18 @@ int pm_runtime_force_resume(struct device *dev)
int (*callback)(struct device *);
int ret = 0;
- callback = RPM_GET_CALLBACK(dev, runtime_resume);
-
- if (!callback) {
- ret = -ENOSYS;
- goto out;
- }
-
- if (!pm_runtime_status_suspended(dev))
+ if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
goto out;
/*
- * Decrease the parent's runtime PM usage count, if we increased it
- * during system suspend in pm_runtime_force_suspend().
- */
- if (atomic_read(&dev->power.usage_count) > 1) {
- if (dev->parent)
- pm_runtime_put_noidle(dev->parent);
- } else {
- goto out;
- }
+ * The value of the parent's children counter is correct already, so
+ * just update the status of the device.
+ */
+ __update_runtime_status(dev, RPM_ACTIVE);
- ret = pm_runtime_set_active(dev);
- if (ret)
- goto out;
+ callback = RPM_GET_CALLBACK(dev, runtime_resume);
- ret = callback(dev);
+ ret = callback ? callback(dev) : 0;
if (ret) {
pm_runtime_set_suspended(dev);
goto out;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index e153e28..0f651ef 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -108,16 +108,10 @@ static ssize_t control_show(struct device *dev, struct device_attribute *attr,
static ssize_t control_store(struct device * dev, struct device_attribute *attr,
const char * buf, size_t n)
{
- char *cp;
- int len = n;
-
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
device_lock(dev);
- if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
+ if (sysfs_streq(buf, ctrl_auto))
pm_runtime_allow(dev);
- else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
+ else if (sysfs_streq(buf, ctrl_on))
pm_runtime_forbid(dev);
else
n = -EINVAL;
@@ -125,9 +119,9 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
return n;
}
-static DEVICE_ATTR(control, 0644, control_show, control_store);
+static DEVICE_ATTR_RW(control);
-static ssize_t rtpm_active_time_show(struct device *dev,
+static ssize_t runtime_active_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
@@ -138,9 +132,9 @@ static ssize_t rtpm_active_time_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL);
+static DEVICE_ATTR_RO(runtime_active_time);
-static ssize_t rtpm_suspended_time_show(struct device *dev,
+static ssize_t runtime_suspended_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
@@ -152,9 +146,9 @@ static ssize_t rtpm_suspended_time_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL);
+static DEVICE_ATTR_RO(runtime_suspended_time);
-static ssize_t rtpm_status_show(struct device *dev,
+static ssize_t runtime_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *p;
@@ -184,7 +178,7 @@ static ssize_t rtpm_status_show(struct device *dev,
return sprintf(buf, p);
}
-static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
+static DEVICE_ATTR_RO(runtime_status);
static ssize_t autosuspend_delay_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -211,26 +205,25 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
return n;
}
-static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
- autosuspend_delay_ms_store);
+static DEVICE_ATTR_RW(autosuspend_delay_ms);
-static ssize_t pm_qos_resume_latency_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s32 value = dev_pm_qos_requested_resume_latency(dev);
if (value == 0)
return sprintf(buf, "n/a\n");
- else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
value = 0;
return sprintf(buf, "%d\n", value);
}
-static ssize_t pm_qos_resume_latency_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
+static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
{
s32 value;
int ret;
@@ -245,7 +238,7 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
if (value == 0)
value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
- } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) {
+ } else if (sysfs_streq(buf, "n/a")) {
value = 0;
} else {
return -EINVAL;
@@ -256,26 +249,25 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
- pm_qos_resume_latency_show, pm_qos_resume_latency_store);
+static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
-static ssize_t pm_qos_latency_tolerance_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
if (value < 0)
return sprintf(buf, "auto\n");
- else if (value == PM_QOS_LATENCY_ANY)
+ if (value == PM_QOS_LATENCY_ANY)
return sprintf(buf, "any\n");
return sprintf(buf, "%d\n", value);
}
-static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
+static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
{
s32 value;
int ret;
@@ -285,9 +277,9 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
if (value < 0)
return -EINVAL;
} else {
- if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
+ if (sysfs_streq(buf, "auto"))
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
- else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
+ else if (sysfs_streq(buf, "any"))
value = PM_QOS_LATENCY_ANY;
else
return -EINVAL;
@@ -296,8 +288,7 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644,
- pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);
+static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
static ssize_t pm_qos_no_power_off_show(struct device *dev,
struct device_attribute *attr,
@@ -323,49 +314,39 @@ static ssize_t pm_qos_no_power_off_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_no_power_off, 0644,
- pm_qos_no_power_off_show, pm_qos_no_power_off_store);
+static DEVICE_ATTR_RW(pm_qos_no_power_off);
#ifdef CONFIG_PM_SLEEP
static const char _enabled[] = "enabled";
static const char _disabled[] = "disabled";
-static ssize_t
-wake_show(struct device * dev, struct device_attribute *attr, char * buf)
+static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%s\n", device_can_wakeup(dev)
? (device_may_wakeup(dev) ? _enabled : _disabled)
: "");
}
-static ssize_t
-wake_store(struct device * dev, struct device_attribute *attr,
- const char * buf, size_t n)
+static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
{
- char *cp;
- int len = n;
-
if (!device_can_wakeup(dev))
return -EINVAL;
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
- if (len == sizeof _enabled - 1
- && strncmp(buf, _enabled, sizeof _enabled - 1) == 0)
+ if (sysfs_streq(buf, _enabled))
device_set_wakeup_enable(dev, 1);
- else if (len == sizeof _disabled - 1
- && strncmp(buf, _disabled, sizeof _disabled - 1) == 0)
+ else if (sysfs_streq(buf, _disabled))
device_set_wakeup_enable(dev, 0);
else
return -EINVAL;
return n;
}
-static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
+static DEVICE_ATTR_RW(wakeup);
static ssize_t wakeup_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -379,10 +360,11 @@ static ssize_t wakeup_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_count);
static ssize_t wakeup_active_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -396,11 +378,11 @@ static ssize_t wakeup_active_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_active_count);
static ssize_t wakeup_abort_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -414,7 +396,7 @@ static ssize_t wakeup_abort_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_abort_count);
static ssize_t wakeup_expire_count_show(struct device *dev,
struct device_attribute *attr,
@@ -432,10 +414,10 @@ static ssize_t wakeup_expire_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_expire_count);
static ssize_t wakeup_active_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
unsigned int active = 0;
bool enabled = false;
@@ -449,10 +431,11 @@ static ssize_t wakeup_active_show(struct device *dev,
return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL);
+static DEVICE_ATTR_RO(wakeup_active);
-static ssize_t wakeup_total_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_total_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -466,10 +449,10 @@ static ssize_t wakeup_total_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_total_time_ms);
-static ssize_t wakeup_max_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_max_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -483,10 +466,11 @@ static ssize_t wakeup_max_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_max_time_ms);
-static ssize_t wakeup_last_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_last_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -500,12 +484,12 @@ static ssize_t wakeup_last_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_last_time_ms);
#ifdef CONFIG_PM_AUTOSLEEP
-static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -519,40 +503,39 @@ static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444,
- wakeup_prevent_sleep_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
#endif /* CONFIG_PM_AUTOSLEEP */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_ADVANCED_DEBUG
-static ssize_t rtpm_usagecount_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_usage_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
}
+static DEVICE_ATTR_RO(runtime_usage);
-static ssize_t rtpm_children_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_active_kids_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", dev->power.ignore_children ?
0 : atomic_read(&dev->power.child_count));
}
+static DEVICE_ATTR_RO(runtime_active_kids);
-static ssize_t rtpm_enabled_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
+ if (dev->power.disable_depth && (dev->power.runtime_auto == false))
return sprintf(buf, "disabled & forbidden\n");
- else if (dev->power.disable_depth)
+ if (dev->power.disable_depth)
return sprintf(buf, "disabled\n");
- else if (dev->power.runtime_auto == false)
+ if (dev->power.runtime_auto == false)
return sprintf(buf, "forbidden\n");
return sprintf(buf, "enabled\n");
}
-
-static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
-static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
-static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
+static DEVICE_ATTR_RO(runtime_enabled);
#ifdef CONFIG_PM_SLEEP
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
@@ -566,23 +549,16 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr,
static ssize_t async_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
- char *cp;
- int len = n;
-
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
- if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0)
+ if (sysfs_streq(buf, _enabled))
device_enable_async_suspend(dev);
- else if (len == sizeof _disabled - 1 &&
- strncmp(buf, _disabled, len) == 0)
+ else if (sysfs_streq(buf, _disabled))
device_disable_async_suspend(dev);
else
return -EINVAL;
return n;
}
-static DEVICE_ATTR(async, 0644, async_show, async_store);
+static DEVICE_ATTR_RW(async);
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM_ADVANCED_DEBUG */
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index ae04298..a8ac86e 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -33,7 +33,6 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
struct wake_irq *wirq)
{
unsigned long flags;
- int err;
if (!dev || !wirq)
return -EINVAL;
@@ -45,12 +44,11 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
return -EEXIST;
}
- err = device_wakeup_attach_irq(dev, wirq);
- if (!err)
- dev->power.wakeirq = wirq;
+ dev->power.wakeirq = wirq;
+ device_wakeup_attach_irq(dev, wirq);
spin_unlock_irqrestore(&dev->power.lock, flags);
- return err;
+ return 0;
}
/**
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 38559f0..ea01621 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -19,6 +19,11 @@
#include "power.h"
+#ifndef CONFIG_SUSPEND
+suspend_state_t pm_suspend_target_state;
+#define pm_suspend_target_state (PM_SUSPEND_ON)
+#endif
+
/*
* If set, the suspend/hibernate code will abort transitions to a sleep state
* if wakeup events are registered during or immediately before the transition.
@@ -268,6 +273,9 @@ int device_wakeup_enable(struct device *dev)
if (!dev || !dev->power.can_wakeup)
return -EINVAL;
+ if (pm_suspend_target_state != PM_SUSPEND_ON)
+ dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
+
ws = wakeup_source_register(dev_name(dev));
if (!ws)
return -ENOMEM;
@@ -291,22 +299,19 @@ EXPORT_SYMBOL_GPL(device_wakeup_enable);
*
* Call under the device's power.lock lock.
*/
-int device_wakeup_attach_irq(struct device *dev,
+void device_wakeup_attach_irq(struct device *dev,
struct wake_irq *wakeirq)
{
struct wakeup_source *ws;
ws = dev->power.wakeup;
- if (!ws) {
- dev_err(dev, "forgot to call call device_init_wakeup?\n");
- return -EINVAL;
- }
+ if (!ws)
+ return;
if (ws->wakeirq)
- return -EEXIST;
+ dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
ws->wakeirq = wakeirq;
- return 0;
}
/**
@@ -448,9 +453,7 @@ int device_init_wakeup(struct device *dev, bool enable)
device_set_wakeup_capable(dev, true);
ret = device_wakeup_enable(dev);
} else {
- if (dev->power.can_wakeup)
- device_wakeup_disable(dev);
-
+ device_wakeup_disable(dev);
device_set_wakeup_capable(dev, false);
}
@@ -464,9 +467,6 @@ EXPORT_SYMBOL_GPL(device_init_wakeup);
*/
int device_set_wakeup_enable(struct device *dev, bool enable)
{
- if (!dev || !dev->power.can_wakeup)
- return -EINVAL;
-
return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 3a1535d..067073e 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,7 +6,6 @@
config REGMAP
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
select IRQ_DOMAIN if REGMAP_IRQ
- select REGMAP_HWSPINLOCK if HWSPINLOCK=y
bool
config REGCACHE_COMPRESSED
@@ -39,5 +38,6 @@ config REGMAP_MMIO
config REGMAP_IRQ
bool
-config REGMAP_HWSPINLOCK
- bool
+config REGMAP_SOUNDWIRE
+ tristate
+ depends on SOUNDWIRE_BUS
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 0d298c4..22d263c 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
obj-$(CONFIG_REGMAP_W1) += regmap-w1.o
+obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 8641183..53785e0 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -77,6 +77,7 @@ struct regmap {
int async_ret;
#ifdef CONFIG_DEBUG_FS
+ bool debugfs_disable;
struct dentry *debugfs;
const char *debugfs_name;
@@ -215,10 +216,17 @@ struct regmap_field {
extern void regmap_debugfs_initcall(void);
extern void regmap_debugfs_init(struct regmap *map, const char *name);
extern void regmap_debugfs_exit(struct regmap *map);
+
+static inline void regmap_debugfs_disable(struct regmap *map)
+{
+ map->debugfs_disable = true;
+}
+
#else
static inline void regmap_debugfs_initcall(void) { }
static inline void regmap_debugfs_init(struct regmap *map, const char *name) { }
static inline void regmap_debugfs_exit(struct regmap *map) { }
+static inline void regmap_debugfs_disable(struct regmap *map) { }
#endif
/* regcache core declarations */
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index 4d2e50b..bc6cd88 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -37,9 +37,12 @@ static int regcache_flat_init(struct regmap *map)
cache = map->cache;
- for (i = 0; i < map->num_reg_defaults; i++)
- cache[regcache_flat_get_index(map, map->reg_defaults[i].reg)] =
- map->reg_defaults[i].def;
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ unsigned int reg = map->reg_defaults[i].reg;
+ unsigned int index = regcache_flat_get_index(map, reg);
+
+ cache[index] = map->reg_defaults[i].def;
+ }
return 0;
}
@@ -56,8 +59,9 @@ static int regcache_flat_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
unsigned int *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
- *value = cache[regcache_flat_get_index(map, reg)];
+ *value = cache[index];
return 0;
}
@@ -66,8 +70,9 @@ static int regcache_flat_write(struct regmap *map, unsigned int reg,
unsigned int value)
{
unsigned int *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
- cache[regcache_flat_get_index(map, reg)] = value;
+ cache[index] = value;
return 0;
}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 36ce351..f326633 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -529,6 +529,18 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
struct regmap_range_node *range_node;
const char *devname = "dummy";
+ /*
+ * Userspace can initiate reads from the hardware over debugfs.
+ * Normally internal regmap structures and buffers are protected with
+ * a mutex or a spinlock, but if the regmap owner decided to disable
+ * all locking mechanisms, this is no longer the case. For safety:
+ * don't create the debugfs entries if locking is disabled.
+ */
+ if (map->debugfs_disable) {
+ dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
+ return;
+ }
+
/* If we don't have the debugfs root yet, postpone init */
if (!regmap_debugfs_root) {
struct regmap_debugfs_node *node;
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
new file mode 100644
index 0000000..50a6638
--- /dev/null
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-17 Intel Corporation.
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/soundwire/sdw.h>
+#include "internal.h"
+
+static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+ return sdw_write(slave, reg, val);
+}
+
+static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ int read;
+
+ read = sdw_read(slave, reg);
+ if (read < 0)
+ return read;
+
+ *val = read;
+ return 0;
+}
+
+static struct regmap_bus regmap_sdw = {
+ .reg_read = regmap_sdw_read,
+ .reg_write = regmap_sdw_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static int regmap_sdw_config_check(const struct regmap_config *config)
+{
+ /* All register are 8-bits wide as per MIPI Soundwire 1.0 Spec */
+ if (config->val_bits != 8)
+ return -ENOTSUPP;
+
+ /* Registers are 32 bits wide */
+ if (config->reg_bits != 32)
+ return -ENOTSUPP;
+
+ if (config->pad_bits != 0)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ int ret;
+
+ ret = regmap_sdw_config_check(config);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return __regmap_init(&sdw->dev, &regmap_sdw,
+ &sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_sdw);
+
+struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ int ret;
+
+ ret = regmap_sdw_config_check(config);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return __devm_regmap_init(&sdw->dev, &regmap_sdw,
+ &sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw);
+
+MODULE_DESCRIPTION("Regmap SoundWire Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 8d516a9..ee302cc 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -414,7 +414,6 @@ static unsigned int regmap_parse_64_native(const void *buf)
}
#endif
-#ifdef REGMAP_HWSPINLOCK
static void regmap_lock_hwlock(void *__map)
{
struct regmap *map = __map;
@@ -457,7 +456,11 @@ static void regmap_unlock_hwlock_irqrestore(void *__map)
hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
}
-#endif
+
+static void regmap_lock_unlock_none(void *__map)
+{
+
+}
static void regmap_lock_mutex(void *__map)
{
@@ -669,16 +672,26 @@ struct regmap *__regmap_init(struct device *dev,
goto err;
}
- if (config->lock && config->unlock) {
+ if (config->name) {
+ map->name = kstrdup_const(config->name, GFP_KERNEL);
+ if (!map->name) {
+ ret = -ENOMEM;
+ goto err_map;
+ }
+ }
+
+ if (config->disable_locking) {
+ map->lock = map->unlock = regmap_lock_unlock_none;
+ regmap_debugfs_disable(map);
+ } else if (config->lock && config->unlock) {
map->lock = config->lock;
map->unlock = config->unlock;
map->lock_arg = config->lock_arg;
- } else if (config->hwlock_id) {
-#ifdef REGMAP_HWSPINLOCK
+ } else if (config->use_hwlock) {
map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
if (!map->hwlock) {
ret = -ENXIO;
- goto err_map;
+ goto err_name;
}
switch (config->hwlock_mode) {
@@ -697,10 +710,6 @@ struct regmap *__regmap_init(struct device *dev,
}
map->lock_arg = map;
-#else
- ret = -EINVAL;
- goto err_map;
-#endif
} else {
if ((bus && bus->fast_io) ||
config->fast_io) {
@@ -762,14 +771,15 @@ struct regmap *__regmap_init(struct device *dev,
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
map->cache_type = config->cache_type;
- map->name = config->name;
spin_lock_init(&map->async_lock);
INIT_LIST_HEAD(&map->async_list);
INIT_LIST_HEAD(&map->async_free);
init_waitqueue_head(&map->async_waitq);
- if (config->read_flag_mask || config->write_flag_mask) {
+ if (config->read_flag_mask ||
+ config->write_flag_mask ||
+ config->zero_flag_mask) {
map->read_flag_mask = config->read_flag_mask;
map->write_flag_mask = config->write_flag_mask;
} else if (bus) {
@@ -1116,8 +1126,10 @@ err_range:
regmap_range_exit(map);
kfree(map->work_buf);
err_hwlock:
- if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ if (map->hwlock)
hwspin_lock_free(map->hwlock);
+err_name:
+ kfree_const(map->name);
err_map:
kfree(map);
err:
@@ -1305,8 +1317,9 @@ void regmap_exit(struct regmap *map)
kfree(async->work_buf);
kfree(async);
}
- if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ if (map->hwlock)
hwspin_lock_free(map->hwlock);
+ kfree_const(map->name);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
@@ -2423,13 +2436,15 @@ static int _regmap_bus_read(void *context, unsigned int reg,
{
int ret;
struct regmap *map = context;
+ void *work_val = map->work_buf + map->format.reg_bytes +
+ map->format.pad_bytes;
if (!map->format.parse_val)
return -EINVAL;
- ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+ ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
if (ret == 0)
- *val = map->format.parse_val(map->work_buf);
+ *val = map->format.parse_val(work_val);
return ret;
}
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 02d78f6..ba8acca 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -55,7 +55,7 @@ config BCMA_DRIVER_PCI
config BCMA_DRIVER_PCI_HOSTMODE
bool "Driver for PCI core working in hostmode"
- depends on MIPS && BCMA_DRIVER_PCI
+ depends on MIPS && BCMA_DRIVER_PCI && PCI_DRIVERS_LEGACY
help
PCI core hostmode operation (external PCI bus).
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 442e777..7280752 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6619,43 +6619,27 @@ static void DAC960_DestroyProcEntries(DAC960_Controller_T *Controller)
#ifdef DAC960_GAM_MINOR
-/*
- * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
-*/
-
-static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
- unsigned long Argument)
+static long DAC960_gam_get_controller_info(DAC960_ControllerInfo_T __user *UserSpaceControllerInfo)
{
- long ErrorCode = 0;
- if (!capable(CAP_SYS_ADMIN)) return -EACCES;
-
- mutex_lock(&DAC960_mutex);
- switch (Request)
- {
- case DAC960_IOCTL_GET_CONTROLLER_COUNT:
- ErrorCode = DAC960_ControllerCount;
- break;
- case DAC960_IOCTL_GET_CONTROLLER_INFO:
- {
- DAC960_ControllerInfo_T __user *UserSpaceControllerInfo =
- (DAC960_ControllerInfo_T __user *) Argument;
DAC960_ControllerInfo_T ControllerInfo;
DAC960_Controller_T *Controller;
int ControllerNumber;
+ long ErrorCode;
+
if (UserSpaceControllerInfo == NULL)
ErrorCode = -EINVAL;
else ErrorCode = get_user(ControllerNumber,
&UserSpaceControllerInfo->ControllerNumber);
if (ErrorCode != 0)
- break;
+ goto out;
ErrorCode = -ENXIO;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1) {
- break;
+ goto out;
}
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T));
ControllerInfo.ControllerNumber = ControllerNumber;
ControllerInfo.FirmwareType = Controller->FirmwareType;
@@ -6670,12 +6654,12 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
strcpy(ControllerInfo.FirmwareVersion, Controller->FirmwareVersion);
ErrorCode = (copy_to_user(UserSpaceControllerInfo, &ControllerInfo,
sizeof(DAC960_ControllerInfo_T)) ? -EFAULT : 0);
- break;
- }
- case DAC960_IOCTL_V1_EXECUTE_COMMAND:
- {
- DAC960_V1_UserCommand_T __user *UserSpaceUserCommand =
- (DAC960_V1_UserCommand_T __user *) Argument;
+out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v1_execute_command(DAC960_V1_UserCommand_T __user *UserSpaceUserCommand)
+{
DAC960_V1_UserCommand_T UserCommand;
DAC960_Controller_T *Controller;
DAC960_Command_T *Command = NULL;
@@ -6688,39 +6672,41 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
int ControllerNumber, DataTransferLength;
unsigned char *DataTransferBuffer = NULL;
dma_addr_t DataTransferBufferDMA;
+ long ErrorCode;
+
if (UserSpaceUserCommand == NULL) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&UserCommand, UserSpaceUserCommand,
sizeof(DAC960_V1_UserCommand_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ControllerNumber = UserCommand.ControllerNumber;
ErrorCode = -ENXIO;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
ErrorCode = -EINVAL;
if (Controller->FirmwareType != DAC960_V1_Controller)
- break;
+ goto out;
CommandOpcode = UserCommand.CommandMailbox.Common.CommandOpcode;
DataTransferLength = UserCommand.DataTransferLength;
if (CommandOpcode & 0x80)
- break;
+ goto out;
if (CommandOpcode == DAC960_V1_DCDB)
{
if (copy_from_user(&DCDB, UserCommand.DCDB,
sizeof(DAC960_V1_DCDB_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
if (DCDB.Channel >= DAC960_V1_MaxChannels)
- break;
+ goto out;
if (!((DataTransferLength == 0 &&
DCDB.Direction
== DAC960_V1_DCDB_NoDataTransfer) ||
@@ -6730,15 +6716,15 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
(DataTransferLength < 0 &&
DCDB.Direction
== DAC960_V1_DCDB_DataTransferSystemToDevice)))
- break;
+ goto out;
if (((DCDB.TransferLengthHigh4 << 16) | DCDB.TransferLength)
!= abs(DataTransferLength))
- break;
+ goto out;
DCDB_IOBUF = pci_alloc_consistent(Controller->PCIDevice,
sizeof(DAC960_V1_DCDB_T), &DCDB_IOBUFDMA);
if (DCDB_IOBUF == NULL) {
ErrorCode = -ENOMEM;
- break;
+ goto out;
}
}
ErrorCode = -ENOMEM;
@@ -6748,19 +6734,19 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DataTransferLength,
&DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
}
else if (DataTransferLength < 0)
{
DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
-DataTransferLength, &DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
if (copy_from_user(DataTransferBuffer,
UserCommand.DataTransferBuffer,
-DataTransferLength)) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
}
if (CommandOpcode == DAC960_V1_DCDB)
@@ -6837,12 +6823,12 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
if (DCDB_IOBUF != NULL)
pci_free_consistent(Controller->PCIDevice, sizeof(DAC960_V1_DCDB_T),
DCDB_IOBUF, DCDB_IOBUFDMA);
- break;
- }
- case DAC960_IOCTL_V2_EXECUTE_COMMAND:
- {
- DAC960_V2_UserCommand_T __user *UserSpaceUserCommand =
- (DAC960_V2_UserCommand_T __user *) Argument;
+ out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v2_execute_command(DAC960_V2_UserCommand_T __user *UserSpaceUserCommand)
+{
DAC960_V2_UserCommand_T UserCommand;
DAC960_Controller_T *Controller;
DAC960_Command_T *Command = NULL;
@@ -6855,26 +6841,26 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
dma_addr_t DataTransferBufferDMA;
unsigned char *RequestSenseBuffer = NULL;
dma_addr_t RequestSenseBufferDMA;
+ long ErrorCode = -EINVAL;
- ErrorCode = -EINVAL;
if (UserSpaceUserCommand == NULL)
- break;
+ goto out;
if (copy_from_user(&UserCommand, UserSpaceUserCommand,
sizeof(DAC960_V2_UserCommand_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = -ENXIO;
ControllerNumber = UserCommand.ControllerNumber;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
if (Controller->FirmwareType != DAC960_V2_Controller){
ErrorCode = -EINVAL;
- break;
+ goto out;
}
DataTransferLength = UserCommand.DataTransferLength;
ErrorCode = -ENOMEM;
@@ -6884,14 +6870,14 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DataTransferLength,
&DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
}
else if (DataTransferLength < 0)
{
DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
-DataTransferLength, &DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
if (copy_from_user(DataTransferBuffer,
UserCommand.DataTransferBuffer,
-DataTransferLength)) {
@@ -7001,42 +6987,44 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
if (RequestSenseBuffer != NULL)
pci_free_consistent(Controller->PCIDevice, RequestSenseLength,
RequestSenseBuffer, RequestSenseBufferDMA);
- break;
- }
- case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
- {
- DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus =
- (DAC960_V2_GetHealthStatus_T __user *) Argument;
+out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v2_get_health_status(DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus)
+{
DAC960_V2_GetHealthStatus_T GetHealthStatus;
DAC960_V2_HealthStatusBuffer_T HealthStatusBuffer;
DAC960_Controller_T *Controller;
int ControllerNumber;
+ long ErrorCode;
+
if (UserSpaceGetHealthStatus == NULL) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&GetHealthStatus, UserSpaceGetHealthStatus,
sizeof(DAC960_V2_GetHealthStatus_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = -ENXIO;
ControllerNumber = GetHealthStatus.ControllerNumber;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
if (Controller->FirmwareType != DAC960_V2_Controller) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&HealthStatusBuffer,
GetHealthStatus.HealthStatusBuffer,
sizeof(DAC960_V2_HealthStatusBuffer_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = wait_event_interruptible_timeout(Controller->HealthStatusWaitQueue,
!(Controller->V2.HealthStatusBuffer->StatusChangeCounter
@@ -7046,7 +7034,7 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DAC960_MonitoringTimerInterval);
if (ErrorCode == -ERESTARTSYS) {
ErrorCode = -EINTR;
- break;
+ goto out;
}
if (copy_to_user(GetHealthStatus.HealthStatusBuffer,
Controller->V2.HealthStatusBuffer,
@@ -7054,7 +7042,39 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
ErrorCode = -EFAULT;
else
ErrorCode = 0;
- }
+
+out:
+ return ErrorCode;
+}
+
+/*
+ * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
+*/
+
+static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
+ unsigned long Argument)
+{
+ long ErrorCode = 0;
+ void __user *argp = (void __user *)Argument;
+ if (!capable(CAP_SYS_ADMIN)) return -EACCES;
+
+ mutex_lock(&DAC960_mutex);
+ switch (Request)
+ {
+ case DAC960_IOCTL_GET_CONTROLLER_COUNT:
+ ErrorCode = DAC960_ControllerCount;
+ break;
+ case DAC960_IOCTL_GET_CONTROLLER_INFO:
+ ErrorCode = DAC960_gam_get_controller_info(argp);
+ break;
+ case DAC960_IOCTL_V1_EXECUTE_COMMAND:
+ ErrorCode = DAC960_gam_v1_execute_command(argp);
+ break;
+ case DAC960_IOCTL_V2_EXECUTE_COMMAND:
+ ErrorCode = DAC960_gam_v2_execute_command(argp);
+ break;
+ case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
+ ErrorCode = DAC960_gam_v2_get_health_status(argp);
break;
default:
ErrorCode = -ENOTTY;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 40579d0..ad9b687 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -20,6 +20,10 @@ config BLK_DEV_NULL_BLK
tristate "Null test block driver"
select CONFIGFS_FS
+config BLK_DEV_NULL_BLK_FAULT_INJECTION
+ bool "Support fault injection for Null test block driver"
+ depends on BLK_DEV_NULL_BLK && FAULT_INJECTION
+
config BLK_DEV_FD
tristate "Normal floppy disk support"
depends on ARCH_MAY_HAVE_PC_FDC
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 9220f8e..c0ebda1 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -112,8 +112,7 @@ enum frame_flags {
struct frame {
struct list_head head;
u32 tag;
- struct timeval sent; /* high-res time packet was sent */
- u32 sent_jiffs; /* low-res jiffies-based sent time */
+ ktime_t sent; /* high-res time packet was sent */
ulong waited;
ulong waited_total;
struct aoetgt *t; /* parent target I belong to */
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 812fed0..540bb60 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -398,8 +398,7 @@ aoecmd_ata_rw(struct aoedev *d)
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -489,8 +488,7 @@ resend(struct aoedev *d, struct frame *f)
skb = skb_clone(skb, GFP_ATOMIC);
if (skb == NULL)
return;
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -499,33 +497,17 @@ resend(struct aoedev *d, struct frame *f)
static int
tsince_hr(struct frame *f)
{
- struct timeval now;
- int n;
+ u64 delta = ktime_to_ns(ktime_sub(ktime_get(), f->sent));
- do_gettimeofday(&now);
- n = now.tv_usec - f->sent.tv_usec;
- n += (now.tv_sec - f->sent.tv_sec) * USEC_PER_SEC;
+ /* delta is normally under 4.2 seconds, avoid 64-bit division */
+ if (likely(delta <= UINT_MAX))
+ return (u32)delta / NSEC_PER_USEC;
- if (n < 0)
- n = -n;
+ /* avoid overflow after 71 minutes */
+ if (delta > ((u64)INT_MAX * NSEC_PER_USEC))
+ return INT_MAX;
- /* For relatively long periods, use jiffies to avoid
- * discrepancies caused by updates to the system time.
- *
- * On system with HZ of 1000, 32-bits is over 49 days
- * worth of jiffies, or over 71 minutes worth of usecs.
- *
- * Jiffies overflow is handled by subtraction of unsigned ints:
- * (gdb) print (unsigned) 2 - (unsigned) 0xfffffffe
- * $3 = 4
- * (gdb)
- */
- if (n > USEC_PER_SEC / 4) {
- n = ((u32) jiffies) - f->sent_jiffs;
- n *= USEC_PER_SEC / HZ;
- }
-
- return n;
+ return div_u64(delta, NSEC_PER_USEC);
}
static int
@@ -589,7 +571,6 @@ reassign_frame(struct frame *f)
nf->waited = 0;
nf->waited_total = f->waited_total;
nf->sent = f->sent;
- nf->sent_jiffs = f->sent_jiffs;
f->skb = skb;
return nf;
@@ -633,8 +614,7 @@ probe(struct aoetgt *t)
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -1432,10 +1412,8 @@ aoecmd_ata_id(struct aoedev *d)
d->timer.function = rexmit_timer;
skb = skb_clone(skb, GFP_ATOMIC);
- if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
- }
+ if (skb)
+ f->sent = ktime_get();
return skb;
}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index bd97908..9f4e6f5 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -953,7 +953,7 @@ static void drbd_bm_endio(struct bio *bio)
struct drbd_bm_aio_ctx *ctx = bio->bi_private;
struct drbd_device *device = ctx->device;
struct drbd_bitmap *b = device->bitmap;
- unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
+ unsigned int idx = bm_page_to_idx(bio_first_page_all(bio));
if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
!bm_test_page_unchanged(b->bm_pages[idx]))
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bc8e615..d5fe720 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1581,9 +1581,8 @@ out:
return err;
}
-static void lo_release(struct gendisk *disk, fmode_t mode)
+static void __lo_release(struct loop_device *lo)
{
- struct loop_device *lo = disk->private_data;
int err;
if (atomic_dec_return(&lo->lo_refcnt))
@@ -1610,6 +1609,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
mutex_unlock(&lo->lo_ctl_mutex);
}
+static void lo_release(struct gendisk *disk, fmode_t mode)
+{
+ mutex_lock(&loop_index_mutex);
+ __lo_release(disk->private_data);
+ mutex_unlock(&loop_index_mutex);
+}
+
static const struct block_device_operations lo_fops = {
.owner = THIS_MODULE,
.open = lo_open,
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ccb9975..6655893 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -12,9 +12,9 @@
#include <linux/slab.h>
#include <linux/blk-mq.h>
#include <linux/hrtimer.h>
-#include <linux/lightnvm.h>
#include <linux/configfs.h>
#include <linux/badblocks.h>
+#include <linux/fault-inject.h>
#define SECTOR_SHIFT 9
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
@@ -27,6 +27,10 @@
#define TICKS_PER_SEC 50ULL
#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+static DECLARE_FAULT_ATTR(null_timeout_attr);
+#endif
+
static inline u64 mb_per_tick(int mbps)
{
return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
@@ -35,13 +39,13 @@ static inline u64 mb_per_tick(int mbps)
struct nullb_cmd {
struct list_head list;
struct llist_node ll_list;
- call_single_data_t csd;
+ struct __call_single_data csd;
struct request *rq;
struct bio *bio;
unsigned int tag;
+ blk_status_t error;
struct nullb_queue *nq;
struct hrtimer timer;
- blk_status_t error;
};
struct nullb_queue {
@@ -107,7 +111,6 @@ struct nullb_device {
unsigned int hw_queue_depth; /* queue depth */
unsigned int index; /* index of the disk, only valid with a disk */
unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
- bool use_lightnvm; /* register as a LightNVM device */
bool blocking; /* blocking blk-mq device */
bool use_per_node_hctx; /* use per-node allocation for hardware context */
bool power; /* power on/off the device */
@@ -121,7 +124,6 @@ struct nullb {
unsigned int index;
struct request_queue *q;
struct gendisk *disk;
- struct nvm_dev *ndev;
struct blk_mq_tag_set *tag_set;
struct blk_mq_tag_set __tag_set;
unsigned int queue_depth;
@@ -139,7 +141,6 @@ static LIST_HEAD(nullb_list);
static struct mutex lock;
static int null_major;
static DEFINE_IDA(nullb_indexes);
-static struct kmem_cache *ppa_cache;
static struct blk_mq_tag_set tag_set;
enum {
@@ -166,6 +167,11 @@ static int g_home_node = NUMA_NO_NODE;
module_param_named(home_node, g_home_node, int, S_IRUGO);
MODULE_PARM_DESC(home_node, "Home node for the device");
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+static char g_timeout_str[80];
+module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), S_IRUGO);
+#endif
+
static int g_queue_mode = NULL_Q_MQ;
static int null_param_store_val(const char *str, int *val, int min, int max)
@@ -208,10 +214,6 @@ static int nr_devices = 1;
module_param(nr_devices, int, S_IRUGO);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
-static bool g_use_lightnvm;
-module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO);
-MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
-
static bool g_blocking;
module_param_named(blocking, g_blocking, bool, S_IRUGO);
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
@@ -345,7 +347,6 @@ NULLB_DEVICE_ATTR(blocksize, uint);
NULLB_DEVICE_ATTR(irqmode, uint);
NULLB_DEVICE_ATTR(hw_queue_depth, uint);
NULLB_DEVICE_ATTR(index, uint);
-NULLB_DEVICE_ATTR(use_lightnvm, bool);
NULLB_DEVICE_ATTR(blocking, bool);
NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
NULLB_DEVICE_ATTR(memory_backed, bool);
@@ -455,7 +456,6 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
&nullb_device_attr_index,
- &nullb_device_attr_use_lightnvm,
&nullb_device_attr_blocking,
&nullb_device_attr_use_per_node_hctx,
&nullb_device_attr_power,
@@ -573,7 +573,6 @@ static struct nullb_device *null_alloc_dev(void)
dev->blocksize = g_bs;
dev->irqmode = g_irqmode;
dev->hw_queue_depth = g_hw_queue_depth;
- dev->use_lightnvm = g_use_lightnvm;
dev->blocking = g_blocking;
dev->use_per_node_hctx = g_use_per_node_hctx;
return dev;
@@ -1352,6 +1351,12 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
+static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
+{
+ pr_info("null: rq %p timed out\n", rq);
+ return BLK_EH_HANDLED;
+}
+
static int null_rq_prep_fn(struct request_queue *q, struct request *req)
{
struct nullb *nullb = q->queuedata;
@@ -1369,6 +1374,16 @@ static int null_rq_prep_fn(struct request_queue *q, struct request *req)
return BLKPREP_DEFER;
}
+static bool should_timeout_request(struct request *rq)
+{
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+ if (g_timeout_str[0])
+ return should_fail(&null_timeout_attr, 1);
+#endif
+
+ return false;
+}
+
static void null_request_fn(struct request_queue *q)
{
struct request *rq;
@@ -1376,12 +1391,20 @@ static void null_request_fn(struct request_queue *q)
while ((rq = blk_fetch_request(q)) != NULL) {
struct nullb_cmd *cmd = rq->special;
- spin_unlock_irq(q->queue_lock);
- null_handle_cmd(cmd);
- spin_lock_irq(q->queue_lock);
+ if (!should_timeout_request(rq)) {
+ spin_unlock_irq(q->queue_lock);
+ null_handle_cmd(cmd);
+ spin_lock_irq(q->queue_lock);
+ }
}
}
+static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
+{
+ pr_info("null: rq %p timed out\n", rq);
+ return BLK_EH_HANDLED;
+}
+
static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1399,12 +1422,16 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(bd->rq);
- return null_handle_cmd(cmd);
+ if (!should_timeout_request(bd->rq))
+ return null_handle_cmd(cmd);
+
+ return BLK_STS_OK;
}
static const struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.complete = null_softirq_done_fn,
+ .timeout = null_timeout_rq,
};
static void cleanup_queue(struct nullb_queue *nq)
@@ -1423,170 +1450,6 @@ static void cleanup_queues(struct nullb *nullb)
kfree(nullb->queues);
}
-#ifdef CONFIG_NVM
-
-static void null_lnvm_end_io(struct request *rq, blk_status_t status)
-{
- struct nvm_rq *rqd = rq->end_io_data;
-
- /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
- rqd->error = status ? -EIO : 0;
- nvm_end_io(rqd);
-
- blk_put_request(rq);
-}
-
-static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
- struct request_queue *q = dev->q;
- struct request *rq;
- struct bio *bio = rqd->bio;
-
- rq = blk_mq_alloc_request(q,
- op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq))
- return -ENOMEM;
-
- blk_init_request_from_bio(rq, bio);
-
- rq->end_io_data = rqd;
-
- blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
-
- return 0;
-}
-
-static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
-{
- struct nullb *nullb = dev->q->queuedata;
- sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
- sector_t blksize;
- struct nvm_id_group *grp;
-
- id->ver_id = 0x1;
- id->vmnt = 0;
- id->cap = 0x2;
- id->dom = 0x1;
-
- id->ppaf.blk_offset = 0;
- id->ppaf.blk_len = 16;
- id->ppaf.pg_offset = 16;
- id->ppaf.pg_len = 16;
- id->ppaf.sect_offset = 32;
- id->ppaf.sect_len = 8;
- id->ppaf.pln_offset = 40;
- id->ppaf.pln_len = 8;
- id->ppaf.lun_offset = 48;
- id->ppaf.lun_len = 8;
- id->ppaf.ch_offset = 56;
- id->ppaf.ch_len = 8;
-
- sector_div(size, nullb->dev->blocksize); /* convert size to pages */
- size >>= 8; /* concert size to pgs pr blk */
- grp = &id->grp;
- grp->mtype = 0;
- grp->fmtype = 0;
- grp->num_ch = 1;
- grp->num_pg = 256;
- blksize = size;
- size >>= 16;
- grp->num_lun = size + 1;
- sector_div(blksize, grp->num_lun);
- grp->num_blk = blksize;
- grp->num_pln = 1;
-
- grp->fpg_sz = nullb->dev->blocksize;
- grp->csecs = nullb->dev->blocksize;
- grp->trdt = 25000;
- grp->trdm = 25000;
- grp->tprt = 500000;
- grp->tprm = 500000;
- grp->tbet = 1500000;
- grp->tbem = 1500000;
- grp->mpos = 0x010101; /* single plane rwe */
- grp->cpar = nullb->dev->hw_queue_depth;
-
- return 0;
-}
-
-static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
-{
- mempool_t *virtmem_pool;
-
- virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
- if (!virtmem_pool) {
- pr_err("null_blk: Unable to create virtual memory pool\n");
- return NULL;
- }
-
- return virtmem_pool;
-}
-
-static void null_lnvm_destroy_dma_pool(void *pool)
-{
- mempool_destroy(pool);
-}
-
-static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
- gfp_t mem_flags, dma_addr_t *dma_handler)
-{
- return mempool_alloc(pool, mem_flags);
-}
-
-static void null_lnvm_dev_dma_free(void *pool, void *entry,
- dma_addr_t dma_handler)
-{
- mempool_free(entry, pool);
-}
-
-static struct nvm_dev_ops null_lnvm_dev_ops = {
- .identity = null_lnvm_id,
- .submit_io = null_lnvm_submit_io,
-
- .create_dma_pool = null_lnvm_create_dma_pool,
- .destroy_dma_pool = null_lnvm_destroy_dma_pool,
- .dev_dma_alloc = null_lnvm_dev_dma_alloc,
- .dev_dma_free = null_lnvm_dev_dma_free,
-
- /* Simulate nvme protocol restriction */
- .max_phys_sect = 64,
-};
-
-static int null_nvm_register(struct nullb *nullb)
-{
- struct nvm_dev *dev;
- int rv;
-
- dev = nvm_alloc_dev(0);
- if (!dev)
- return -ENOMEM;
-
- dev->q = nullb->q;
- memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
- dev->ops = &null_lnvm_dev_ops;
-
- rv = nvm_register(dev);
- if (rv) {
- kfree(dev);
- return rv;
- }
- nullb->ndev = dev;
- return 0;
-}
-
-static void null_nvm_unregister(struct nullb *nullb)
-{
- nvm_unregister(nullb->ndev);
-}
-#else
-static int null_nvm_register(struct nullb *nullb)
-{
- pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
- return -EINVAL;
-}
-static void null_nvm_unregister(struct nullb *nullb) {}
-#endif /* CONFIG_NVM */
-
static void null_del_dev(struct nullb *nullb)
{
struct nullb_device *dev = nullb->dev;
@@ -1595,10 +1458,7 @@ static void null_del_dev(struct nullb *nullb)
list_del_init(&nullb->list);
- if (dev->use_lightnvm)
- null_nvm_unregister(nullb);
- else
- del_gendisk(nullb->disk);
+ del_gendisk(nullb->disk);
if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
hrtimer_cancel(&nullb->bw_timer);
@@ -1610,8 +1470,7 @@ static void null_del_dev(struct nullb *nullb)
if (dev->queue_mode == NULL_Q_MQ &&
nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
- if (!dev->use_lightnvm)
- put_disk(nullb->disk);
+ put_disk(nullb->disk);
cleanup_queues(nullb);
if (null_cache_active(nullb))
null_free_device_storage(nullb->dev, true);
@@ -1775,11 +1634,6 @@ static void null_validate_conf(struct nullb_device *dev)
{
dev->blocksize = round_down(dev->blocksize, 512);
dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
- if (dev->use_lightnvm && dev->blocksize != 4096)
- dev->blocksize = 4096;
-
- if (dev->use_lightnvm && dev->queue_mode != NULL_Q_MQ)
- dev->queue_mode = NULL_Q_MQ;
if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
if (dev->submit_queues != nr_online_nodes)
@@ -1805,6 +1659,20 @@ static void null_validate_conf(struct nullb_device *dev)
dev->mbps = 0;
}
+static bool null_setup_fault(void)
+{
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+ if (!g_timeout_str[0])
+ return true;
+
+ if (!setup_fault_attr(&null_timeout_attr, g_timeout_str))
+ return false;
+
+ null_timeout_attr.verbose = 0;
+#endif
+ return true;
+}
+
static int null_add_dev(struct nullb_device *dev)
{
struct nullb *nullb;
@@ -1838,6 +1706,10 @@ static int null_add_dev(struct nullb_device *dev)
if (rv)
goto out_cleanup_queues;
+ if (!null_setup_fault())
+ goto out_cleanup_queues;
+
+ nullb->tag_set->timeout = 5 * HZ;
nullb->q = blk_mq_init_queue(nullb->tag_set);
if (IS_ERR(nullb->q)) {
rv = -ENOMEM;
@@ -1861,8 +1733,14 @@ static int null_add_dev(struct nullb_device *dev)
rv = -ENOMEM;
goto out_cleanup_queues;
}
+
+ if (!null_setup_fault())
+ goto out_cleanup_blk_queue;
+
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
+ blk_queue_rq_timed_out(nullb->q, null_rq_timed_out_fn);
+ nullb->q->rq_timeout = 5 * HZ;
rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;
@@ -1895,11 +1773,7 @@ static int null_add_dev(struct nullb_device *dev)
sprintf(nullb->disk_name, "nullb%d", nullb->index);
- if (dev->use_lightnvm)
- rv = null_nvm_register(nullb);
- else
- rv = null_gendisk_register(nullb);
-
+ rv = null_gendisk_register(nullb);
if (rv)
goto out_cleanup_blk_queue;
@@ -1938,18 +1812,6 @@ static int __init null_init(void)
g_bs = PAGE_SIZE;
}
- if (g_use_lightnvm && g_bs != 4096) {
- pr_warn("null_blk: LightNVM only supports 4k block size\n");
- pr_warn("null_blk: defaults block size to 4k\n");
- g_bs = 4096;
- }
-
- if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) {
- pr_warn("null_blk: LightNVM only supported for blk-mq\n");
- pr_warn("null_blk: defaults queue mode to blk-mq\n");
- g_queue_mode = NULL_Q_MQ;
- }
-
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.\n",
@@ -1982,16 +1844,6 @@ static int __init null_init(void)
goto err_conf;
}
- if (g_use_lightnvm) {
- ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
- 0, 0, NULL);
- if (!ppa_cache) {
- pr_err("null_blk: unable to create ppa cache\n");
- ret = -ENOMEM;
- goto err_ppa;
- }
- }
-
for (i = 0; i < nr_devices; i++) {
dev = null_alloc_dev();
if (!dev) {
@@ -2015,8 +1867,6 @@ err_dev:
null_del_dev(nullb);
null_free_dev(dev);
}
- kmem_cache_destroy(ppa_cache);
-err_ppa:
unregister_blkdev(null_major, "nullb");
err_conf:
configfs_unregister_subsystem(&nullb_subsys);
@@ -2047,8 +1897,6 @@ static void __exit null_exit(void)
if (g_queue_mode == NULL_Q_MQ && shared_tags)
blk_mq_free_tag_set(&tag_set);
-
- kmem_cache_destroy(ppa_cache);
}
module_init(null_init);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 6797479..531a091 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2579,14 +2579,14 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
bdev = bdget(dev);
if (!bdev)
return -ENOMEM;
+ ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
+ if (ret)
+ return ret;
if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
- bdput(bdev);
+ blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
return -EINVAL;
}
- ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
- if (ret)
- return ret;
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
@@ -2745,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
pd->pkt_dev = MKDEV(pktdev_major, idx);
ret = pkt_new_dev(pd, dev);
if (ret)
- goto out_new_dev;
+ goto out_mem2;
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
@@ -2763,8 +2763,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
mutex_unlock(&ctl_mutex);
return 0;
-out_new_dev:
- blk_cleanup_queue(disk->queue);
out_mem2:
put_disk(disk);
out_mem:
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 38fc5f3..cc93522 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3047,13 +3047,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
mutex_unlock(&rbd_dev->watch_mutex);
}
+static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
+{
+ struct rbd_client_id cid = rbd_get_cid(rbd_dev);
+
+ strcpy(rbd_dev->lock_cookie, cookie);
+ rbd_set_owner_cid(rbd_dev, &cid);
+ queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
+}
+
/*
* lock_rwsem must be held for write
*/
static int rbd_lock(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- struct rbd_client_id cid = rbd_get_cid(rbd_dev);
char cookie[32];
int ret;
@@ -3068,9 +3076,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
return ret;
rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
- strcpy(rbd_dev->lock_cookie, cookie);
- rbd_set_owner_cid(rbd_dev, &cid);
- queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
+ __rbd_lock(rbd_dev, cookie);
return 0;
}
@@ -3856,7 +3862,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
queue_delayed_work(rbd_dev->task_wq,
&rbd_dev->lock_dwork, 0);
} else {
- strcpy(rbd_dev->lock_cookie, cookie);
+ __rbd_lock(rbd_dev, cookie);
}
}
@@ -4381,7 +4387,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
segment_size = rbd_obj_bytes(&rbd_dev->header);
blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
q->limits.max_sectors = queue_max_hw_sectors(q);
- blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
+ blk_queue_max_segments(q, USHRT_MAX);
blk_queue_max_segment_size(q, segment_size);
blk_queue_io_min(q, segment_size);
blk_queue_io_opt(q, segment_size);
diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
deleted file mode 100644
index e5565fb..0000000
--- a/drivers/block/smart1,2.h
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Disk Array driver for Compaq SMART2 Controllers
- * Copyright 1998 Compaq Computer Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- * If you want to make changes, improve or add functionality to this
- * driver, you'll probably need the Compaq Array Controller Interface
- * Specificiation (Document number ECG086/1198)
- */
-
-/*
- * This file contains the controller communication implementation for
- * Compaq SMART-1 and SMART-2 controllers. To the best of my knowledge,
- * this should support:
- *
- * PCI:
- * SMART-2/P, SMART-2DH, SMART-2SL, SMART-221, SMART-3100ES, SMART-3200
- * Integerated SMART Array Controller, SMART-4200, SMART-4250ES
- *
- * EISA:
- * SMART-2/E, SMART, IAES, IDA-2, IDA
- */
-
-/*
- * Memory mapped FIFO interface (SMART 42xx cards)
- */
-static void smart4_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- writel(c->busaddr, h->vaddr + S42XX_REQUEST_PORT_OFFSET);
-}
-
-/*
- * This card is the opposite of the other cards.
- * 0 turns interrupts on...
- * 0x08 turns them off...
- */
-static void smart4_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val)
- { /* Turn interrupts on */
- writel(0, h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET);
- } else /* Turn them off */
- {
- writel( S42XX_INTR_OFF,
- h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET);
- }
-}
-
-/*
- * For older cards FIFO Full = 0.
- * On this card 0 means there is room, anything else FIFO Full.
- *
- */
-static unsigned long smart4_fifo_full(ctlr_info_t *h)
-{
-
- return (!readl(h->vaddr + S42XX_REQUEST_PORT_OFFSET));
-}
-
-/* This type of controller returns -1 if the fifo is empty,
- * Not 0 like the others.
- * And we need to let it know we read a value out
- */
-static unsigned long smart4_completed(ctlr_info_t *h)
-{
- long register_value
- = readl(h->vaddr + S42XX_REPLY_PORT_OFFSET);
-
- /* Fifo is empty */
- if( register_value == 0xffffffff)
- return 0;
-
- /* Need to let it know we got the reply */
- /* We do this by writing a 0 to the port we just read from */
- writel(0, h->vaddr + S42XX_REPLY_PORT_OFFSET);
-
- return ((unsigned long) register_value);
-}
-
- /*
- * This hardware returns interrupt pending at a different place and
- * it does not tell us if the fifo is empty, we will have check
- * that by getting a 0 back from the command_completed call.
- */
-static unsigned long smart4_intr_pending(ctlr_info_t *h)
-{
- unsigned long register_value =
- readl(h->vaddr + S42XX_INTR_STATUS);
-
- if( register_value & S42XX_INTR_PENDING)
- return FIFO_NOT_EMPTY;
- return 0 ;
-}
-
-static struct access_method smart4_access = {
- smart4_submit_command,
- smart4_intr_mask,
- smart4_fifo_full,
- smart4_intr_pending,
- smart4_completed,
-};
-
-/*
- * Memory mapped FIFO interface (PCI SMART2 and SMART 3xxx cards)
- */
-static void smart2_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- writel(c->busaddr, h->vaddr + COMMAND_FIFO);
-}
-
-static void smart2_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- writel(val, h->vaddr + INTR_MASK);
-}
-
-static unsigned long smart2_fifo_full(ctlr_info_t *h)
-{
- return readl(h->vaddr + COMMAND_FIFO);
-}
-
-static unsigned long smart2_completed(ctlr_info_t *h)
-{
- return readl(h->vaddr + COMMAND_COMPLETE_FIFO);
-}
-
-static unsigned long smart2_intr_pending(ctlr_info_t *h)
-{
- return readl(h->vaddr + INTR_PENDING);
-}
-
-static struct access_method smart2_access = {
- smart2_submit_command,
- smart2_intr_mask,
- smart2_fifo_full,
- smart2_intr_pending,
- smart2_completed,
-};
-
-/*
- * IO access for SMART-2/E cards
- */
-static void smart2e_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- outl(c->busaddr, h->io_mem_addr + COMMAND_FIFO);
-}
-
-static void smart2e_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- outl(val, h->io_mem_addr + INTR_MASK);
-}
-
-static unsigned long smart2e_fifo_full(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + COMMAND_FIFO);
-}
-
-static unsigned long smart2e_completed(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + COMMAND_COMPLETE_FIFO);
-}
-
-static unsigned long smart2e_intr_pending(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + INTR_PENDING);
-}
-
-static struct access_method smart2e_access = {
- smart2e_submit_command,
- smart2e_intr_mask,
- smart2e_fifo_full,
- smart2e_intr_pending,
- smart2e_completed,
-};
-
-/*
- * IO access for older SMART-1 type cards
- */
-#define SMART1_SYSTEM_MASK 0xC8E
-#define SMART1_SYSTEM_DOORBELL 0xC8F
-#define SMART1_LOCAL_MASK 0xC8C
-#define SMART1_LOCAL_DOORBELL 0xC8D
-#define SMART1_INTR_MASK 0xC89
-#define SMART1_LISTADDR 0xC90
-#define SMART1_LISTLEN 0xC94
-#define SMART1_TAG 0xC97
-#define SMART1_COMPLETE_ADDR 0xC98
-#define SMART1_LISTSTATUS 0xC9E
-
-#define CHANNEL_BUSY 0x01
-#define CHANNEL_CLEAR 0x02
-
-static void smart1_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- /*
- * This __u16 is actually a bunch of control flags on SMART
- * and below. We want them all to be zero.
- */
- c->hdr.size = 0;
-
- outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
-
- outl(c->busaddr, h->io_mem_addr + SMART1_LISTADDR);
- outw(c->size, h->io_mem_addr + SMART1_LISTLEN);
-
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
-}
-
-static void smart1_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val == 1) {
- outb(0xFD, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
- outb(0x01, h->io_mem_addr + SMART1_INTR_MASK);
- outb(0x01, h->io_mem_addr + SMART1_SYSTEM_MASK);
- } else {
- outb(0, h->io_mem_addr + 0xC8E);
- }
-}
-
-static unsigned long smart1_fifo_full(ctlr_info_t *h)
-{
- unsigned char chan;
- chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_CLEAR;
- return chan;
-}
-
-static unsigned long smart1_completed(ctlr_info_t *h)
-{
- unsigned char status;
- unsigned long cmd;
-
- if (inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY) {
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
-
- cmd = inl(h->io_mem_addr + SMART1_COMPLETE_ADDR);
- status = inb(h->io_mem_addr + SMART1_LISTSTATUS);
-
- outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
-
- /*
- * this is x86 (actually compaq x86) only, so it's ok
- */
- if (cmd) ((cmdlist_t*)bus_to_virt(cmd))->req.hdr.rcode = status;
- } else {
- cmd = 0;
- }
- return cmd;
-}
-
-static unsigned long smart1_intr_pending(ctlr_info_t *h)
-{
- unsigned char chan;
- chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY;
- return chan;
-}
-
-static struct access_method smart1_access = {
- smart1_submit_command,
- smart1_intr_mask,
- smart1_fifo_full,
- smart1_intr_pending,
- smart1_completed,
-};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d70eba3..0afa6c8 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -430,7 +430,7 @@ static void put_entry_bdev(struct zram *zram, unsigned long entry)
static void zram_page_end_io(struct bio *bio)
{
- struct page *page = bio->bi_io_vec[0].bv_page;
+ struct page *page = bio_first_page_all(bio);
page_endio(page, op_is_write(bio_op(bio)),
blk_status_to_errno(bio->bi_status));
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index dc7b3c7..57e011d 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -120,7 +120,7 @@ config QCOM_EBI2
SRAM, ethernet adapters, FPGAs and LCD displays.
config SIMPLE_PM_BUS
- bool "Simple Power-Managed Bus Driver"
+ tristate "Simple Power-Managed Bus Driver"
depends on OF && PM
help
Driver for transparent busses that don't need a real driver, but
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 328ca93..1b76d95 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = {
.match = sunxi_rsb_device_match,
.probe = sunxi_rsb_device_probe,
.remove = sunxi_rsb_device_remove,
+ .uevent = of_device_uevent_modalias,
};
static void sunxi_rsb_dev_release(struct device *dev)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 779869e..71fad74 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -199,6 +199,9 @@ struct smi_info {
/* The timer for this si. */
struct timer_list si_timer;
+ /* This flag is set, if the timer can be set */
+ bool timer_can_start;
+
/* This flag is set, if the timer is running (timer_pending() isn't enough) */
bool timer_running;
@@ -355,6 +358,8 @@ out:
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
{
+ if (!smi_info->timer_can_start)
+ return;
smi_info->last_timeout_jiffies = jiffies;
mod_timer(&smi_info->si_timer, new_val);
smi_info->timer_running = true;
@@ -374,21 +379,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
}
-static void start_check_enables(struct smi_info *smi_info, bool start_timer)
+static void start_check_enables(struct smi_info *smi_info)
{
unsigned char msg[2];
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
- if (start_timer)
- start_new_msg(smi_info, msg, 2);
- else
- smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+ start_new_msg(smi_info, msg, 2);
smi_info->si_state = SI_CHECKING_ENABLES;
}
-static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
+static void start_clear_flags(struct smi_info *smi_info)
{
unsigned char msg[3];
@@ -397,10 +399,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
msg[2] = WDT_PRE_TIMEOUT_INT;
- if (start_timer)
- start_new_msg(smi_info, msg, 3);
- else
- smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+ start_new_msg(smi_info, msg, 3);
smi_info->si_state = SI_CLEARING_FLAGS;
}
@@ -435,11 +434,11 @@ static void start_getting_events(struct smi_info *smi_info)
* Note that we cannot just use disable_irq(), since the interrupt may
* be shared.
*/
-static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
+static inline bool disable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
smi_info->interrupt_disabled = true;
- start_check_enables(smi_info, start_timer);
+ start_check_enables(smi_info);
return true;
}
return false;
@@ -449,7 +448,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
smi_info->interrupt_disabled = false;
- start_check_enables(smi_info, true);
+ start_check_enables(smi_info);
return true;
}
return false;
@@ -467,7 +466,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
msg = ipmi_alloc_smi_msg();
if (!msg) {
- if (!disable_si_irq(smi_info, true))
+ if (!disable_si_irq(smi_info))
smi_info->si_state = SI_NORMAL;
} else if (enable_si_irq(smi_info)) {
ipmi_free_smi_msg(msg);
@@ -483,7 +482,7 @@ retry:
/* Watchdog pre-timeout */
smi_inc_stat(smi_info, watchdog_pretimeouts);
- start_clear_flags(smi_info, true);
+ start_clear_flags(smi_info);
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
if (smi_info->intf)
ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -866,7 +865,7 @@ restart:
* disable and messages disabled.
*/
if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
- start_check_enables(smi_info, true);
+ start_check_enables(smi_info);
} else {
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
if (!smi_info->curr_msg)
@@ -1167,6 +1166,7 @@ static int smi_start_processing(void *send_info,
/* Set up the timer that drives the interface. */
timer_setup(&new_smi->si_timer, smi_timeout, 0);
+ new_smi->timer_can_start = true;
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
/* Try to claim any interrupts. */
@@ -1936,10 +1936,12 @@ static void check_for_broken_irqs(struct smi_info *smi_info)
check_set_rcv_irq(smi_info);
}
-static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
+static inline void stop_timer_and_thread(struct smi_info *smi_info)
{
if (smi_info->thread != NULL)
kthread_stop(smi_info->thread);
+
+ smi_info->timer_can_start = false;
if (smi_info->timer_running)
del_timer_sync(&smi_info->si_timer);
}
@@ -2152,7 +2154,7 @@ static int try_smi_init(struct smi_info *new_smi)
* Start clearing the flags before we enable interrupts or the
* timer to avoid racing with the timer.
*/
- start_clear_flags(new_smi, false);
+ start_clear_flags(new_smi);
/*
* IRQ is defined to be set when non-zero. req_events will
@@ -2238,7 +2240,7 @@ out_err_remove_attrs:
dev_set_drvdata(new_smi->io.dev, NULL);
out_err_stop_timer:
- wait_for_timer_and_thread(new_smi);
+ stop_timer_and_thread(new_smi);
out_err:
new_smi->interrupt_disabled = true;
@@ -2388,7 +2390,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
*/
if (to_clean->io.irq_cleanup)
to_clean->io.irq_cleanup(&to_clean->io);
- wait_for_timer_and_thread(to_clean);
+ stop_timer_and_thread(to_clean);
/*
* Timeouts are stopped, now make sure the interrupts are off
@@ -2400,7 +2402,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
schedule_timeout_uninterruptible(1);
}
if (to_clean->handlers)
- disable_si_irq(to_clean, false);
+ disable_si_irq(to_clean);
while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
poll(to_clean);
schedule_timeout_uninterruptible(1);
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
index 090b073..6b10f0e 100644
--- a/drivers/char/ipmi/ipmi_si_parisc.c
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -10,6 +10,8 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev)
{
struct si_sm_io io;
+ memset(&io, 0, sizeof(io));
+
io.si_type = SI_KCS;
io.addr_source = SI_DEVICETREE;
io.addr_type = IPMI_MEM_ADDR_SPACE;
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index 99771f5..27dd11c 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -103,10 +103,13 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
io.addr_source_cleanup = ipmi_pci_cleanup;
io.addr_source_data = pdev;
- if (pci_resource_flags(pdev, 0) & IORESOURCE_IO)
+ if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
io.addr_type = IPMI_IO_ADDR_SPACE;
- else
+ io.io_setup = ipmi_si_port_setup;
+ } else {
io.addr_type = IPMI_MEM_ADDR_SPACE;
+ io.io_setup = ipmi_si_mem_setup;
+ }
io.addr_data = pci_resource_start(pdev, 0);
io.regspacing = ipmi_pci_probe_regspacing(&io);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 647d056..b56c11f 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -220,7 +220,8 @@ static bool clk_core_is_enabled(struct clk_core *core)
ret = core->ops->is_enabled(core->hw);
done:
- clk_pm_runtime_put(core);
+ if (core->dev)
+ pm_runtime_put(core->dev);
return ret;
}
@@ -1564,6 +1565,9 @@ static void clk_change_rate(struct clk_core *core)
best_parent_rate = core->parent->rate;
}
+ if (clk_pm_runtime_get(core))
+ return;
+
if (core->flags & CLK_SET_RATE_UNGATE) {
unsigned long flags;
@@ -1634,6 +1638,8 @@ static void clk_change_rate(struct clk_core *core)
/* handle the new child who might not be in core->children yet */
if (core->new_child)
clk_change_rate(core->new_child);
+
+ clk_pm_runtime_put(core);
}
static int clk_core_set_rate_nolock(struct clk_core *core,
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index a1a6342..f00d875 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -83,9 +84,20 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
return 0;
}
+static int sun9i_mmc_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ sun9i_mmc_reset_assert(rcdev, id);
+ udelay(10);
+ sun9i_mmc_reset_deassert(rcdev, id);
+
+ return 0;
+}
+
static const struct reset_control_ops sun9i_mmc_reset_ops = {
.assert = sun9i_mmc_reset_assert,
.deassert = sun9i_mmc_reset_deassert,
+ .reset = sun9i_mmc_reset_reset,
};
static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index c729a88..b3b4ed9 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -269,6 +269,7 @@ config CLKSRC_STM32
bool "Clocksource for STM32 SoCs" if !ARCH_STM32
depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
select CLKSRC_MMIO
+ select TIMER_OF
config CLKSRC_MPS2
bool "Clocksource for MPS2 SoCs" if COMPILE_TEST
@@ -441,6 +442,13 @@ config MTK_TIMER
help
Support for Mediatek timer driver.
+config SPRD_TIMER
+ bool "Spreadtrum timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select TIMER_OF
+ help
+ Enables support for the Spreadtrum timer driver.
+
config SYS_SUPPORTS_SH_MTU2
bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 72711f1..d6dec44 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += owl-timer.o
+obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/owl-timer.c b/drivers/clocksource/owl-timer.c
index c686305..ea00a5e 100644
--- a/drivers/clocksource/owl-timer.c
+++ b/drivers/clocksource/owl-timer.c
@@ -168,5 +168,6 @@ static int __init owl_timer_init(struct device_node *node)
return 0;
}
-CLOCKSOURCE_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
-CLOCKSOURCE_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s700, "actions,s700-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 9de47d4..43f4d5c 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -384,7 +384,7 @@ static int __init tcb_clksrc_init(void)
printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
divided_rate / 1000000,
- ((divided_rate + 500000) % 1000000) / 1000);
+ ((divided_rate % 1000000) + 500) / 1000);
if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
/* use apropriate function to read 32 bit counter */
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index a319904..06ed88a 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -24,7 +24,13 @@
#include "timer-of.h"
-static __init void timer_irq_exit(struct of_timer_irq *of_irq)
+/**
+ * timer_of_irq_exit - Release the interrupt
+ * @of_irq: an of_timer_irq structure pointer
+ *
+ * Free the irq resource
+ */
+static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
{
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
@@ -34,8 +40,24 @@ static __init void timer_irq_exit(struct of_timer_irq *of_irq)
free_irq(of_irq->irq, clkevt);
}
-static __init int timer_irq_init(struct device_node *np,
- struct of_timer_irq *of_irq)
+/**
+ * timer_of_irq_init - Request the interrupt
+ * @np: a device tree node pointer
+ * @of_irq: an of_timer_irq structure pointer
+ *
+ * Get the interrupt number from the DT from its definition and
+ * request it. The interrupt is gotten by falling back the following way:
+ *
+ * - Get interrupt number by name
+ * - Get interrupt number by index
+ *
+ * When the interrupt is per CPU, 'request_percpu_irq()' is called,
+ * otherwise 'request_irq()' is used.
+ *
+ * Returns 0 on success, < 0 otherwise
+ */
+static __init int timer_of_irq_init(struct device_node *np,
+ struct of_timer_irq *of_irq)
{
int ret;
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
@@ -72,15 +94,30 @@ static __init int timer_irq_init(struct device_node *np,
return 0;
}
-static __init void timer_clk_exit(struct of_timer_clk *of_clk)
+/**
+ * timer_of_clk_exit - Release the clock resources
+ * @of_clk: a of_timer_clk structure pointer
+ *
+ * Disables and releases the refcount on the clk
+ */
+static __init void timer_of_clk_exit(struct of_timer_clk *of_clk)
{
of_clk->rate = 0;
clk_disable_unprepare(of_clk->clk);
clk_put(of_clk->clk);
}
-static __init int timer_clk_init(struct device_node *np,
- struct of_timer_clk *of_clk)
+/**
+ * timer_of_clk_init - Initialize the clock resources
+ * @np: a device tree node pointer
+ * @of_clk: a of_timer_clk structure pointer
+ *
+ * Get the clock by name or by index, enable it and get the rate
+ *
+ * Returns 0 on success, < 0 otherwise
+ */
+static __init int timer_of_clk_init(struct device_node *np,
+ struct of_timer_clk *of_clk)
{
int ret;
@@ -116,19 +153,19 @@ out_clk_put:
goto out;
}
-static __init void timer_base_exit(struct of_timer_base *of_base)
+static __init void timer_of_base_exit(struct of_timer_base *of_base)
{
iounmap(of_base->base);
}
-static __init int timer_base_init(struct device_node *np,
- struct of_timer_base *of_base)
+static __init int timer_of_base_init(struct device_node *np,
+ struct of_timer_base *of_base)
{
- const char *name = of_base->name ? of_base->name : np->full_name;
-
- of_base->base = of_io_request_and_map(np, of_base->index, name);
+ of_base->base = of_base->name ?
+ of_io_request_and_map(np, of_base->index, of_base->name) :
+ of_iomap(np, of_base->index);
if (IS_ERR(of_base->base)) {
- pr_err("Failed to iomap (%s)\n", name);
+ pr_err("Failed to iomap (%s)\n", of_base->name);
return PTR_ERR(of_base->base);
}
@@ -141,21 +178,21 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
int flags = 0;
if (to->flags & TIMER_OF_BASE) {
- ret = timer_base_init(np, &to->of_base);
+ ret = timer_of_base_init(np, &to->of_base);
if (ret)
goto out_fail;
flags |= TIMER_OF_BASE;
}
if (to->flags & TIMER_OF_CLOCK) {
- ret = timer_clk_init(np, &to->of_clk);
+ ret = timer_of_clk_init(np, &to->of_clk);
if (ret)
goto out_fail;
flags |= TIMER_OF_CLOCK;
}
if (to->flags & TIMER_OF_IRQ) {
- ret = timer_irq_init(np, &to->of_irq);
+ ret = timer_of_irq_init(np, &to->of_irq);
if (ret)
goto out_fail;
flags |= TIMER_OF_IRQ;
@@ -163,17 +200,20 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
if (!to->clkevt.name)
to->clkevt.name = np->name;
+
+ to->np = np;
+
return ret;
out_fail:
if (flags & TIMER_OF_IRQ)
- timer_irq_exit(&to->of_irq);
+ timer_of_irq_exit(&to->of_irq);
if (flags & TIMER_OF_CLOCK)
- timer_clk_exit(&to->of_clk);
+ timer_of_clk_exit(&to->of_clk);
if (flags & TIMER_OF_BASE)
- timer_base_exit(&to->of_base);
+ timer_of_base_exit(&to->of_base);
return ret;
}
@@ -187,11 +227,11 @@ out_fail:
void __init timer_of_cleanup(struct timer_of *to)
{
if (to->flags & TIMER_OF_IRQ)
- timer_irq_exit(&to->of_irq);
+ timer_of_irq_exit(&to->of_irq);
if (to->flags & TIMER_OF_CLOCK)
- timer_clk_exit(&to->of_clk);
+ timer_of_clk_exit(&to->of_clk);
if (to->flags & TIMER_OF_BASE)
- timer_base_exit(&to->of_base);
+ timer_of_base_exit(&to->of_base);
}
diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
index 3f708f1..a5478f3 100644
--- a/drivers/clocksource/timer-of.h
+++ b/drivers/clocksource/timer-of.h
@@ -33,6 +33,7 @@ struct of_timer_clk {
struct timer_of {
unsigned int flags;
+ struct device_node *np;
struct clock_event_device clkevt;
struct of_timer_base of_base;
struct of_timer_irq of_irq;
diff --git a/drivers/clocksource/timer-sprd.c b/drivers/clocksource/timer-sprd.c
new file mode 100644
index 0000000..ef9ebea
--- /dev/null
+++ b/drivers/clocksource/timer-sprd.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include "timer-of.h"
+
+#define TIMER_NAME "sprd_timer"
+
+#define TIMER_LOAD_LO 0x0
+#define TIMER_LOAD_HI 0x4
+#define TIMER_VALUE_LO 0x8
+#define TIMER_VALUE_HI 0xc
+
+#define TIMER_CTL 0x10
+#define TIMER_CTL_PERIOD_MODE BIT(0)
+#define TIMER_CTL_ENABLE BIT(1)
+#define TIMER_CTL_64BIT_WIDTH BIT(16)
+
+#define TIMER_INT 0x14
+#define TIMER_INT_EN BIT(0)
+#define TIMER_INT_RAW_STS BIT(1)
+#define TIMER_INT_MASK_STS BIT(2)
+#define TIMER_INT_CLR BIT(3)
+
+#define TIMER_VALUE_SHDW_LO 0x18
+#define TIMER_VALUE_SHDW_HI 0x1c
+
+#define TIMER_VALUE_LO_MASK GENMASK(31, 0)
+
+static void sprd_timer_enable(void __iomem *base, u32 flag)
+{
+ u32 val = readl_relaxed(base + TIMER_CTL);
+
+ val |= TIMER_CTL_ENABLE;
+ if (flag & TIMER_CTL_64BIT_WIDTH)
+ val |= TIMER_CTL_64BIT_WIDTH;
+ else
+ val &= ~TIMER_CTL_64BIT_WIDTH;
+
+ if (flag & TIMER_CTL_PERIOD_MODE)
+ val |= TIMER_CTL_PERIOD_MODE;
+ else
+ val &= ~TIMER_CTL_PERIOD_MODE;
+
+ writel_relaxed(val, base + TIMER_CTL);
+}
+
+static void sprd_timer_disable(void __iomem *base)
+{
+ u32 val = readl_relaxed(base + TIMER_CTL);
+
+ val &= ~TIMER_CTL_ENABLE;
+ writel_relaxed(val, base + TIMER_CTL);
+}
+
+static void sprd_timer_update_counter(void __iomem *base, unsigned long cycles)
+{
+ writel_relaxed(cycles & TIMER_VALUE_LO_MASK, base + TIMER_LOAD_LO);
+ writel_relaxed(0, base + TIMER_LOAD_HI);
+}
+
+static void sprd_timer_enable_interrupt(void __iomem *base)
+{
+ writel_relaxed(TIMER_INT_EN, base + TIMER_INT);
+}
+
+static void sprd_timer_clear_interrupt(void __iomem *base)
+{
+ u32 val = readl_relaxed(base + TIMER_INT);
+
+ val |= TIMER_INT_CLR;
+ writel_relaxed(val, base + TIMER_INT);
+}
+
+static int sprd_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ sprd_timer_update_counter(timer_of_base(to), cycles);
+ sprd_timer_enable(timer_of_base(to), 0);
+
+ return 0;
+}
+
+static int sprd_timer_set_periodic(struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ sprd_timer_update_counter(timer_of_base(to), timer_of_period(to));
+ sprd_timer_enable(timer_of_base(to), TIMER_CTL_PERIOD_MODE);
+
+ return 0;
+}
+
+static int sprd_timer_shutdown(struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ return 0;
+}
+
+static irqreturn_t sprd_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *ce = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_clear_interrupt(timer_of_base(to));
+
+ if (clockevent_state_oneshot(ce))
+ sprd_timer_disable(timer_of_base(to));
+
+ ce->event_handler(ce);
+ return IRQ_HANDLED;
+}
+
+static struct timer_of to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+ .clkevt = {
+ .name = TIMER_NAME,
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = sprd_timer_shutdown,
+ .set_state_periodic = sprd_timer_set_periodic,
+ .set_next_event = sprd_timer_set_next_event,
+ .cpumask = cpu_possible_mask,
+ },
+
+ .of_irq = {
+ .handler = sprd_timer_interrupt,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+};
+
+static int __init sprd_timer_init(struct device_node *np)
+{
+ int ret;
+
+ ret = timer_of_init(np, &to);
+ if (ret)
+ return ret;
+
+ sprd_timer_enable_interrupt(timer_of_base(&to));
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ 1, UINT_MAX);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(sc9860_timer, "sprd,sc9860-timer", sprd_timer_init);
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index 8f24237..e5cdc3a 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/of.h>
@@ -16,175 +17,318 @@
#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/reset.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#include "timer-of.h"
#define TIM_CR1 0x00
#define TIM_DIER 0x0c
#define TIM_SR 0x10
#define TIM_EGR 0x14
+#define TIM_CNT 0x24
#define TIM_PSC 0x28
#define TIM_ARR 0x2c
+#define TIM_CCR1 0x34
#define TIM_CR1_CEN BIT(0)
+#define TIM_CR1_UDIS BIT(1)
#define TIM_CR1_OPM BIT(3)
#define TIM_CR1_ARPE BIT(7)
#define TIM_DIER_UIE BIT(0)
+#define TIM_DIER_CC1IE BIT(1)
#define TIM_SR_UIF BIT(0)
#define TIM_EGR_UG BIT(0)
-struct stm32_clock_event_ddata {
- struct clock_event_device evtdev;
- unsigned periodic_top;
- void __iomem *base;
+#define TIM_PSC_MAX USHRT_MAX
+#define TIM_PSC_CLKRATE 10000
+
+struct stm32_timer_private {
+ int bits;
};
-static int stm32_clock_event_shutdown(struct clock_event_device *evtdev)
+/**
+ * stm32_timer_of_bits_set - set accessor helper
+ * @to: a timer_of structure pointer
+ * @bits: the number of bits (16 or 32)
+ *
+ * Accessor helper to set the number of bits in the timer-of private
+ * structure.
+ *
+ */
+static void stm32_timer_of_bits_set(struct timer_of *to, int bits)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
- void *base = data->base;
+ struct stm32_timer_private *pd = to->private_data;
- writel_relaxed(0, base + TIM_CR1);
- return 0;
+ pd->bits = bits;
+}
+
+/**
+ * stm32_timer_of_bits_get - get accessor helper
+ * @to: a timer_of structure pointer
+ *
+ * Accessor helper to get the number of bits in the timer-of private
+ * structure.
+ *
+ * Returns an integer corresponding to the number of bits.
+ */
+static int stm32_timer_of_bits_get(struct timer_of *to)
+{
+ struct stm32_timer_private *pd = to->private_data;
+
+ return pd->bits;
+}
+
+static void __iomem *stm32_timer_cnt __read_mostly;
+
+static u64 notrace stm32_read_sched_clock(void)
+{
+ return readl_relaxed(stm32_timer_cnt);
+}
+
+static struct delay_timer stm32_timer_delay;
+
+static unsigned long stm32_read_delay(void)
+{
+ return readl_relaxed(stm32_timer_cnt);
}
-static int stm32_clock_event_set_periodic(struct clock_event_device *evtdev)
+static void stm32_clock_event_disable(struct timer_of *to)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
- void *base = data->base;
+ writel_relaxed(0, timer_of_base(to) + TIM_DIER);
+}
+
+/**
+ * stm32_timer_start - Start the counter without event
+ * @to: a timer_of structure pointer
+ *
+ * Start the timer in order to have the counter reset and start
+ * incrementing but disable interrupt event when there is a counter
+ * overflow. By default, the counter direction is used as upcounter.
+ */
+static void stm32_timer_start(struct timer_of *to)
+{
+ writel_relaxed(TIM_CR1_UDIS | TIM_CR1_CEN, timer_of_base(to) + TIM_CR1);
+}
+
+static int stm32_clock_event_shutdown(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_clock_event_disable(to);
- writel_relaxed(data->periodic_top, base + TIM_ARR);
- writel_relaxed(TIM_CR1_ARPE | TIM_CR1_CEN, base + TIM_CR1);
return 0;
}
static int stm32_clock_event_set_next_event(unsigned long evt,
- struct clock_event_device *evtdev)
+ struct clock_event_device *clkevt)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
+ struct timer_of *to = to_timer_of(clkevt);
+ unsigned long now, next;
+
+ next = readl_relaxed(timer_of_base(to) + TIM_CNT) + evt;
+ writel_relaxed(next, timer_of_base(to) + TIM_CCR1);
+ now = readl_relaxed(timer_of_base(to) + TIM_CNT);
+
+ if ((next - now) > evt)
+ return -ETIME;
- writel_relaxed(evt, data->base + TIM_ARR);
- writel_relaxed(TIM_CR1_ARPE | TIM_CR1_OPM | TIM_CR1_CEN,
- data->base + TIM_CR1);
+ writel_relaxed(TIM_DIER_CC1IE, timer_of_base(to) + TIM_DIER);
+
+ return 0;
+}
+
+static int stm32_clock_event_set_periodic(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_timer_start(to);
+
+ return stm32_clock_event_set_next_event(timer_of_period(to), clkevt);
+}
+
+static int stm32_clock_event_set_oneshot(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_timer_start(to);
return 0;
}
static irqreturn_t stm32_clock_event_handler(int irq, void *dev_id)
{
- struct stm32_clock_event_ddata *data = dev_id;
+ struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ writel_relaxed(0, timer_of_base(to) + TIM_SR);
- writel_relaxed(0, data->base + TIM_SR);
+ if (clockevent_state_periodic(clkevt))
+ stm32_clock_event_set_periodic(clkevt);
+ else
+ stm32_clock_event_shutdown(clkevt);
- data->evtdev.event_handler(&data->evtdev);
+ clkevt->event_handler(clkevt);
return IRQ_HANDLED;
}
-static struct stm32_clock_event_ddata clock_event_ddata = {
- .evtdev = {
- .name = "stm32 clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .set_state_shutdown = stm32_clock_event_shutdown,
- .set_state_periodic = stm32_clock_event_set_periodic,
- .set_state_oneshot = stm32_clock_event_shutdown,
- .tick_resume = stm32_clock_event_shutdown,
- .set_next_event = stm32_clock_event_set_next_event,
- .rating = 200,
- },
-};
+/**
+ * stm32_timer_width - Sort out the timer width (32/16)
+ * @to: a pointer to a timer-of structure
+ *
+ * Write the 32-bit max value and read/return the result. If the timer
+ * is 32 bits wide, the result will be UINT_MAX, otherwise it will
+ * be truncated by the 16-bit register to USHRT_MAX.
+ *
+ */
+static void __init stm32_timer_set_width(struct timer_of *to)
+{
+ u32 width;
+
+ writel_relaxed(UINT_MAX, timer_of_base(to) + TIM_ARR);
+
+ width = readl_relaxed(timer_of_base(to) + TIM_ARR);
+
+ stm32_timer_of_bits_set(to, width == UINT_MAX ? 32 : 16);
+}
-static int __init stm32_clockevent_init(struct device_node *np)
+/**
+ * stm32_timer_set_prescaler - Compute and set the prescaler register
+ * @to: a pointer to a timer-of structure
+ *
+ * Depending on the timer width, compute the prescaler to always
+ * target a 10MHz timer rate for 16 bits. 32-bit timers are
+ * considered precise and long enough to not use the prescaler.
+ */
+static void __init stm32_timer_set_prescaler(struct timer_of *to)
{
- struct stm32_clock_event_ddata *data = &clock_event_ddata;
- struct clk *clk;
- struct reset_control *rstc;
- unsigned long rate, max_delta;
- int irq, ret, bits, prescaler = 1;
-
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- pr_err("failed to get clock for clockevent (%d)\n", ret);
- goto err_clk_get;
+ int prescaler = 1;
+
+ if (stm32_timer_of_bits_get(to) != 32) {
+ prescaler = DIV_ROUND_CLOSEST(timer_of_rate(to),
+ TIM_PSC_CLKRATE);
+ /*
+ * The prescaler register is an u16, the variable
+ * can't be greater than TIM_PSC_MAX, let's cap it in
+ * this case.
+ */
+ prescaler = prescaler < TIM_PSC_MAX ? prescaler : TIM_PSC_MAX;
}
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("failed to enable timer clock for clockevent (%d)\n",
- ret);
- goto err_clk_enable;
- }
+ writel_relaxed(prescaler - 1, timer_of_base(to) + TIM_PSC);
+ writel_relaxed(TIM_EGR_UG, timer_of_base(to) + TIM_EGR);
+ writel_relaxed(0, timer_of_base(to) + TIM_SR);
- rate = clk_get_rate(clk);
+ /* Adjust rate and period given the prescaler value */
+ to->of_clk.rate = DIV_ROUND_CLOSEST(to->of_clk.rate, prescaler);
+ to->of_clk.period = DIV_ROUND_UP(to->of_clk.rate, HZ);
+}
- rstc = of_reset_control_get(np, NULL);
- if (!IS_ERR(rstc)) {
- reset_control_assert(rstc);
- reset_control_deassert(rstc);
+static int __init stm32_clocksource_init(struct timer_of *to)
+{
+ u32 bits = stm32_timer_of_bits_get(to);
+ const char *name = to->np->full_name;
+
+ /*
+ * This driver allows to register several timers and relies on
+ * the generic time framework to select the right one.
+ * However, nothing allows to do the same for the
+ * sched_clock. We are not interested in a sched_clock for the
+ * 16-bit timers but only for the 32-bit one, so if no 32-bit
+ * timer is registered yet, we select this 32-bit timer as a
+ * sched_clock.
+ */
+ if (bits == 32 && !stm32_timer_cnt) {
+
+ /*
+ * Start immediately the counter as we will be using
+ * it right after.
+ */
+ stm32_timer_start(to);
+
+ stm32_timer_cnt = timer_of_base(to) + TIM_CNT;
+ sched_clock_register(stm32_read_sched_clock, bits, timer_of_rate(to));
+ pr_info("%s: STM32 sched_clock registered\n", name);
+
+ stm32_timer_delay.read_current_timer = stm32_read_delay;
+ stm32_timer_delay.freq = timer_of_rate(to);
+ register_current_timer_delay(&stm32_timer_delay);
+ pr_info("%s: STM32 delay timer registered\n", name);
}
- data->base = of_iomap(np, 0);
- if (!data->base) {
- ret = -ENXIO;
- pr_err("failed to map registers for clockevent\n");
- goto err_iomap;
- }
+ return clocksource_mmio_init(timer_of_base(to) + TIM_CNT, name,
+ timer_of_rate(to), bits == 32 ? 250 : 100,
+ bits, clocksource_mmio_readl_up);
+}
- irq = irq_of_parse_and_map(np, 0);
- if (!irq) {
- ret = -EINVAL;
- pr_err("%pOF: failed to get irq.\n", np);
- goto err_get_irq;
- }
+static void __init stm32_clockevent_init(struct timer_of *to)
+{
+ u32 bits = stm32_timer_of_bits_get(to);
- /* Detect whether the timer is 16 or 32 bits */
- writel_relaxed(~0U, data->base + TIM_ARR);
- max_delta = readl_relaxed(data->base + TIM_ARR);
- if (max_delta == ~0U) {
- prescaler = 1;
- bits = 32;
- } else {
- prescaler = 1024;
- bits = 16;
- }
- writel_relaxed(0, data->base + TIM_ARR);
+ to->clkevt.name = to->np->full_name;
+ to->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ to->clkevt.set_state_shutdown = stm32_clock_event_shutdown;
+ to->clkevt.set_state_periodic = stm32_clock_event_set_periodic;
+ to->clkevt.set_state_oneshot = stm32_clock_event_set_oneshot;
+ to->clkevt.tick_resume = stm32_clock_event_shutdown;
+ to->clkevt.set_next_event = stm32_clock_event_set_next_event;
+ to->clkevt.rating = bits == 32 ? 250 : 100;
+
+ clockevents_config_and_register(&to->clkevt, timer_of_rate(to), 0x1,
+ (1 << bits) - 1);
+
+ pr_info("%pOF: STM32 clockevent driver initialized (%d bits)\n",
+ to->np, bits);
+}
+
+static int __init stm32_timer_init(struct device_node *node)
+{
+ struct reset_control *rstc;
+ struct timer_of *to;
+ int ret;
+
+ to = kzalloc(sizeof(*to), GFP_KERNEL);
+ if (!to)
+ return -ENOMEM;
- writel_relaxed(prescaler - 1, data->base + TIM_PSC);
- writel_relaxed(TIM_EGR_UG, data->base + TIM_EGR);
- writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER);
- writel_relaxed(0, data->base + TIM_SR);
+ to->flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE;
+ to->of_irq.handler = stm32_clock_event_handler;
- data->periodic_top = DIV_ROUND_CLOSEST(rate, prescaler * HZ);
+ ret = timer_of_init(node, to);
+ if (ret)
+ goto err;
- clockevents_config_and_register(&data->evtdev,
- DIV_ROUND_CLOSEST(rate, prescaler),
- 0x1, max_delta);
+ to->private_data = kzalloc(sizeof(struct stm32_timer_private),
+ GFP_KERNEL);
+ if (!to->private_data)
+ goto deinit;
- ret = request_irq(irq, stm32_clock_event_handler, IRQF_TIMER,
- "stm32 clockevent", data);
- if (ret) {
- pr_err("%pOF: failed to request irq.\n", np);
- goto err_get_irq;
+ rstc = of_reset_control_get(node, NULL);
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
}
- pr_info("%pOF: STM32 clockevent driver initialized (%d bits)\n",
- np, bits);
+ stm32_timer_set_width(to);
- return ret;
+ stm32_timer_set_prescaler(to);
+
+ ret = stm32_clocksource_init(to);
+ if (ret)
+ goto deinit;
+
+ stm32_clockevent_init(to);
+ return 0;
-err_get_irq:
- iounmap(data->base);
-err_iomap:
- clk_disable_unprepare(clk);
-err_clk_enable:
- clk_put(clk);
-err_clk_get:
+deinit:
+ timer_of_cleanup(to);
+err:
+ kfree(to);
return ret;
}
-TIMER_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
+TIMER_OF_DECLARE(stm32, "st,stm32-timer", stm32_timer_init);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index bdce448..3a88e33 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -2,6 +2,29 @@
# ARM CPU Frequency scaling drivers
#
+config ACPI_CPPC_CPUFREQ
+ tristate "CPUFreq driver based on the ACPI CPPC spec"
+ depends on ACPI_PROCESSOR
+ select ACPI_CPPC_LIB
+ help
+ This adds a CPUFreq driver which uses CPPC methods
+ as described in the ACPIv5.1 spec. CPPC stands for
+ Collaborative Processor Performance Controls. It
+ is based on an abstract continuous scale of CPU
+ performance values which allows the remote power
+ processor to flexibly optimize for power and
+ performance. CPPC relies on power management firmware
+ support for its operation.
+
+ If in doubt, say N.
+
+config ARM_ARMADA_37XX_CPUFREQ
+ tristate "Armada 37xx CPUFreq support"
+ depends on ARCH_MVEBU
+ help
+ This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
+ The Armada 37xx PMU supports 4 frequency and VDD levels.
+
# big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
@@ -12,6 +35,30 @@ config ARM_BIG_LITTLE_CPUFREQ
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
+config ARM_DT_BL_CPUFREQ
+ tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && OF
+ help
+ This enables probing via DT for Generic CPUfreq driver for ARM
+ big.LITTLE platform. This gets frequency tables from DT.
+
+config ARM_SCPI_CPUFREQ
+ tristate "SCPI based CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
+ help
+ This adds the CPUfreq driver support for ARM big.LITTLE platforms
+ using SCPI protocol for CPU power management.
+
+ This driver uses SCPI Message Protocol driver to interact with the
+ firmware providing the CPU DVFS functionality.
+
+config ARM_VEXPRESS_SPC_CPUFREQ
+ tristate "Versatile Express SPC based CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+ help
+ This add the CPUfreq driver support for Versatile Express
+ big.LITTLE platforms using SPC for power management.
+
config ARM_BRCMSTB_AVS_CPUFREQ
tristate "Broadcom STB AVS CPUfreq driver"
depends on ARCH_BRCMSTB || COMPILE_TEST
@@ -33,20 +80,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
If in doubt, say N.
-config ARM_DT_BL_CPUFREQ
- tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && OF
- help
- This enables probing via DT for Generic CPUfreq driver for ARM
- big.LITTLE platform. This gets frequency tables from DT.
-
-config ARM_VEXPRESS_SPC_CPUFREQ
- tristate "Versatile Express SPC based CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
- help
- This add the CPUfreq driver support for Versatile Express
- big.LITTLE platforms using SPC for power management.
-
config ARM_EXYNOS5440_CPUFREQ
tristate "SAMSUNG EXYNOS5440"
depends on SOC_EXYNOS5440
@@ -205,16 +238,6 @@ config ARM_SA1100_CPUFREQ
config ARM_SA1110_CPUFREQ
bool
-config ARM_SCPI_CPUFREQ
- tristate "SCPI based CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
- help
- This adds the CPUfreq driver support for ARM big.LITTLE platforms
- using SCPI protocol for CPU power management.
-
- This driver uses SCPI Message Protocol driver to interact with the
- firmware providing the CPU DVFS functionality.
-
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR
@@ -275,20 +298,3 @@ config ARM_PXA2xx_CPUFREQ
This add the CPUFreq driver support for Intel PXA2xx SOCs.
If in doubt, say N.
-
-config ACPI_CPPC_CPUFREQ
- tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI_PROCESSOR
- select ACPI_CPPC_LIB
- default n
- help
- This adds a CPUFreq driver which uses CPPC methods
- as described in the ACPIv5.1 spec. CPPC stands for
- Collaborative Processor Performance Controls. It
- is based on an abstract continuous scale of CPU
- performance values which allows the remote power
- processor to flexibly optimize for power and
- performance. CPPC relies on power management firmware
- support for its operation.
-
- If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 812f9e0..e07715c 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -52,23 +52,26 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
# LITTLE drivers, so that it is probed last.
obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
+obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
+obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o
+obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
@@ -81,8 +84,6 @@ obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
-obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
-obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
##################################################################################
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 65ec5f0..c56b57d 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -526,34 +526,13 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
static void bL_cpufreq_ready(struct cpufreq_policy *policy)
{
- struct device *cpu_dev = get_cpu_device(policy->cpu);
int cur_cluster = cpu_to_cluster(policy->cpu);
- struct device_node *np;
/* Do not register a cpu_cooling device if we are in IKS mode */
if (cur_cluster >= MAX_CLUSTERS)
return;
- np = of_node_get(cpu_dev->of_node);
- if (WARN_ON(!np))
- return;
-
- if (of_find_property(np, "#cooling-cells", NULL)) {
- u32 power_coefficient = 0;
-
- of_property_read_u32(np, "dynamic-power-coefficient",
- &power_coefficient);
-
- cdev[cur_cluster] = of_cpufreq_power_cooling_register(np,
- policy, power_coefficient, NULL);
- if (IS_ERR(cdev[cur_cluster])) {
- dev_err(cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(cdev[cur_cluster]));
- cdev[cur_cluster] = NULL;
- }
- }
- of_node_put(np);
+ cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver bL_cpufreq_driver = {
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
new file mode 100644
index 0000000..c6ebc88
--- /dev/null
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPU frequency scaling support for Armada 37xx platform.
+ *
+ * Copyright (C) 2017 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* Power management in North Bridge register set */
+#define ARMADA_37XX_NB_L0L1 0x18
+#define ARMADA_37XX_NB_L2L3 0x1C
+#define ARMADA_37XX_NB_TBG_DIV_OFF 13
+#define ARMADA_37XX_NB_TBG_DIV_MASK 0x7
+#define ARMADA_37XX_NB_CLK_SEL_OFF 11
+#define ARMADA_37XX_NB_CLK_SEL_MASK 0x1
+#define ARMADA_37XX_NB_CLK_SEL_TBG 0x1
+#define ARMADA_37XX_NB_TBG_SEL_OFF 9
+#define ARMADA_37XX_NB_TBG_SEL_MASK 0x3
+#define ARMADA_37XX_NB_VDD_SEL_OFF 6
+#define ARMADA_37XX_NB_VDD_SEL_MASK 0x3
+#define ARMADA_37XX_NB_CONFIG_SHIFT 16
+#define ARMADA_37XX_NB_DYN_MOD 0x24
+#define ARMADA_37XX_NB_CLK_SEL_EN BIT(26)
+#define ARMADA_37XX_NB_TBG_EN BIT(28)
+#define ARMADA_37XX_NB_DIV_EN BIT(29)
+#define ARMADA_37XX_NB_VDD_EN BIT(30)
+#define ARMADA_37XX_NB_DFS_EN BIT(31)
+#define ARMADA_37XX_NB_CPU_LOAD 0x30
+#define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3
+#define ARMADA_37XX_DVFS_LOAD_0 0
+#define ARMADA_37XX_DVFS_LOAD_1 1
+#define ARMADA_37XX_DVFS_LOAD_2 2
+#define ARMADA_37XX_DVFS_LOAD_3 3
+
+/*
+ * On Armada 37xx the Power management manages 4 level of CPU load,
+ * each level can be associated with a CPU clock source, a CPU
+ * divider, a VDD level, etc...
+ */
+#define LOAD_LEVEL_NR 4
+
+struct armada_37xx_dvfs {
+ u32 cpu_freq_max;
+ u8 divider[LOAD_LEVEL_NR];
+};
+
+static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
+ {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
+ {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
+ {.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
+ {.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
+};
+
+static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(armada_37xx_dvfs); i++) {
+ if (freq == armada_37xx_dvfs[i].cpu_freq_max)
+ return &armada_37xx_dvfs[i];
+ }
+
+ pr_err("Unsupported CPU frequency %d MHz\n", freq/1000000);
+ return NULL;
+}
+
+/*
+ * Setup the four level managed by the hardware. Once the four level
+ * will be configured then the DVFS will be enabled.
+ */
+static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
+ struct clk *clk, u8 *divider)
+{
+ int load_lvl;
+ struct clk *parent;
+
+ for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
+ unsigned int reg, mask, val, offset = 0;
+
+ if (load_lvl <= ARMADA_37XX_DVFS_LOAD_1)
+ reg = ARMADA_37XX_NB_L0L1;
+ else
+ reg = ARMADA_37XX_NB_L2L3;
+
+ if (load_lvl == ARMADA_37XX_DVFS_LOAD_0 ||
+ load_lvl == ARMADA_37XX_DVFS_LOAD_2)
+ offset += ARMADA_37XX_NB_CONFIG_SHIFT;
+
+ /* Set cpu clock source, for all the level we use TBG */
+ val = ARMADA_37XX_NB_CLK_SEL_TBG << ARMADA_37XX_NB_CLK_SEL_OFF;
+ mask = (ARMADA_37XX_NB_CLK_SEL_MASK
+ << ARMADA_37XX_NB_CLK_SEL_OFF);
+
+ /*
+ * Set cpu divider based on the pre-computed array in
+ * order to have balanced step.
+ */
+ val |= divider[load_lvl] << ARMADA_37XX_NB_TBG_DIV_OFF;
+ mask |= (ARMADA_37XX_NB_TBG_DIV_MASK
+ << ARMADA_37XX_NB_TBG_DIV_OFF);
+
+ /* Set VDD divider which is actually the load level. */
+ val |= load_lvl << ARMADA_37XX_NB_VDD_SEL_OFF;
+ mask |= (ARMADA_37XX_NB_VDD_SEL_MASK
+ << ARMADA_37XX_NB_VDD_SEL_OFF);
+
+ val <<= offset;
+ mask <<= offset;
+
+ regmap_update_bits(base, reg, mask, val);
+ }
+
+ /*
+ * Set cpu clock source, for all the level we keep the same
+ * clock source that the one already configured. For this one
+ * we need to use the clock framework
+ */
+ parent = clk_get_parent(clk);
+ clk_set_parent(clk, parent);
+}
+
+static void __init armada37xx_cpufreq_disable_dvfs(struct regmap *base)
+{
+ unsigned int reg = ARMADA_37XX_NB_DYN_MOD,
+ mask = ARMADA_37XX_NB_DFS_EN;
+
+ regmap_update_bits(base, reg, mask, 0);
+}
+
+static void __init armada37xx_cpufreq_enable_dvfs(struct regmap *base)
+{
+ unsigned int val, reg = ARMADA_37XX_NB_CPU_LOAD,
+ mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+
+ /* Start with the highest load (0) */
+ val = ARMADA_37XX_DVFS_LOAD_0;
+ regmap_update_bits(base, reg, mask, val);
+
+ /* Now enable DVFS for the CPUs */
+ reg = ARMADA_37XX_NB_DYN_MOD;
+ mask = ARMADA_37XX_NB_CLK_SEL_EN | ARMADA_37XX_NB_TBG_EN |
+ ARMADA_37XX_NB_DIV_EN | ARMADA_37XX_NB_VDD_EN |
+ ARMADA_37XX_NB_DFS_EN;
+
+ regmap_update_bits(base, reg, mask, mask);
+}
+
+static int __init armada37xx_cpufreq_driver_init(void)
+{
+ struct armada_37xx_dvfs *dvfs;
+ struct platform_device *pdev;
+ unsigned int cur_frequency;
+ struct regmap *nb_pm_base;
+ struct device *cpu_dev;
+ int load_lvl, ret;
+ struct clk *clk;
+
+ nb_pm_base =
+ syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
+
+ if (IS_ERR(nb_pm_base))
+ return -ENODEV;
+
+ /* Before doing any configuration on the DVFS first, disable it */
+ armada37xx_cpufreq_disable_dvfs(nb_pm_base);
+
+ /*
+ * On CPU 0 register the operating points supported (which are
+ * the nominal CPU frequency and full integer divisions of
+ * it).
+ */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ dev_err(cpu_dev, "Cannot get CPU\n");
+ return -ENODEV;
+ }
+
+ clk = clk_get(cpu_dev, 0);
+ if (IS_ERR(clk)) {
+ dev_err(cpu_dev, "Cannot get clock for CPU0\n");
+ return PTR_ERR(clk);
+ }
+
+ /* Get nominal (current) CPU frequency */
+ cur_frequency = clk_get_rate(clk);
+ if (!cur_frequency) {
+ dev_err(cpu_dev, "Failed to get clock rate for CPU\n");
+ return -EINVAL;
+ }
+
+ dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
+ if (!dvfs)
+ return -EINVAL;
+
+ armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
+
+ for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
+ load_lvl++) {
+ unsigned long freq = cur_frequency / dvfs->divider[load_lvl];
+
+ ret = dev_pm_opp_add(cpu_dev, freq, 0);
+ if (ret) {
+ /* clean-up the already added opp before leaving */
+ while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
+ freq = cur_frequency / dvfs->divider[load_lvl];
+ dev_pm_opp_remove(cpu_dev, freq);
+ }
+ return ret;
+ }
+ }
+
+ /* Now that everything is setup, enable the DVFS at hardware level */
+ armada37xx_cpufreq_enable_dvfs(nb_pm_base);
+
+ pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
+late_initcall(armada37xx_cpufreq_driver_init);
+
+MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
+MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index ecc56e2..3b585e4 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -108,6 +108,14 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "marvell,armadaxp", },
+ { .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt2712", },
+ { .compatible = "mediatek,mt7622", },
+ { .compatible = "mediatek,mt7623", },
+ { .compatible = "mediatek,mt817x", },
+ { .compatible = "mediatek,mt8173", },
+ { .compatible = "mediatek,mt8176", },
+
{ .compatible = "nvidia,tegra124", },
{ .compatible = "st,stih407", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 545946a..de3d104 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -319,33 +319,8 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
static void cpufreq_ready(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
- struct device_node *np = of_node_get(priv->cpu_dev->of_node);
- if (WARN_ON(!np))
- return;
-
- /*
- * For now, just loading the cooling device;
- * thermal DT code takes care of matching them.
- */
- if (of_find_property(np, "#cooling-cells", NULL)) {
- u32 power_coefficient = 0;
-
- of_property_read_u32(np, "dynamic-power-coefficient",
- &power_coefficient);
-
- priv->cdev = of_cpufreq_power_cooling_register(np,
- policy, power_coefficient, NULL);
- if (IS_ERR(priv->cdev)) {
- dev_err(priv->cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(priv->cdev));
-
- priv->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ priv->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver dt_cpufreq_driver = {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 41d148a..421f318 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -601,19 +601,18 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
/**
* cpufreq_parse_governor - parse a governor string
*/
-static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
- struct cpufreq_governor **governor)
+static int cpufreq_parse_governor(char *str_governor,
+ struct cpufreq_policy *policy)
{
- int err = -EINVAL;
-
if (cpufreq_driver->setpolicy) {
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
- *policy = CPUFREQ_POLICY_PERFORMANCE;
- err = 0;
- } else if (!strncasecmp(str_governor, "powersave",
- CPUFREQ_NAME_LEN)) {
- *policy = CPUFREQ_POLICY_POWERSAVE;
- err = 0;
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ return 0;
+ }
+
+ if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ return 0;
}
} else {
struct cpufreq_governor *t;
@@ -621,26 +620,31 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
mutex_lock(&cpufreq_governor_mutex);
t = find_governor(str_governor);
-
- if (t == NULL) {
+ if (!t) {
int ret;
mutex_unlock(&cpufreq_governor_mutex);
+
ret = request_module("cpufreq_%s", str_governor);
- mutex_lock(&cpufreq_governor_mutex);
+ if (ret)
+ return -EINVAL;
- if (ret == 0)
- t = find_governor(str_governor);
- }
+ mutex_lock(&cpufreq_governor_mutex);
- if (t != NULL) {
- *governor = t;
- err = 0;
+ t = find_governor(str_governor);
}
+ if (t && !try_module_get(t->owner))
+ t = NULL;
mutex_unlock(&cpufreq_governor_mutex);
+
+ if (t) {
+ policy->governor = t;
+ return 0;
+ }
}
- return err;
+
+ return -EINVAL;
}
/**
@@ -760,11 +764,14 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
if (ret != 1)
return -EINVAL;
- if (cpufreq_parse_governor(str_governor, &new_policy.policy,
- &new_policy.governor))
+ if (cpufreq_parse_governor(str_governor, &new_policy))
return -EINVAL;
ret = cpufreq_set_policy(policy, &new_policy);
+
+ if (new_policy.governor)
+ module_put(new_policy.governor->owner);
+
return ret ? ret : count;
}
@@ -1044,8 +1051,7 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
if (policy->last_policy)
new_policy.policy = policy->last_policy;
else
- cpufreq_parse_governor(gov->name, &new_policy.policy,
- NULL);
+ cpufreq_parse_governor(gov->name, &new_policy);
}
/* set default policy */
return cpufreq_set_policy(policy, &new_policy);
@@ -2160,7 +2166,6 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
mutex_lock(&cpufreq_governor_mutex);
list_del(&governor->governor_list);
mutex_unlock(&cpufreq_governor_mutex);
- return;
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 58d4f4e..ca38229 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -22,6 +22,8 @@
#include "cpufreq_governor.h"
+#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC)
+
static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
static DEFINE_MUTEX(gov_dbs_data_mutex);
@@ -47,11 +49,15 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct policy_dbs_info *policy_dbs;
+ unsigned int sampling_interval;
int ret;
- ret = sscanf(buf, "%u", &dbs_data->sampling_rate);
- if (ret != 1)
+
+ ret = sscanf(buf, "%u", &sampling_interval);
+ if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
return -EINVAL;
+ dbs_data->sampling_rate = sampling_interval;
+
/*
* We are operating under dbs_data->mutex and so the list and its
* entries can't be freed concurrently.
@@ -430,7 +436,14 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
if (ret)
goto free_policy_dbs_info;
- dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy);
+ /*
+ * The sampling interval should not be less than the transition latency
+ * of the CPU and it also cannot be too small for dbs_update() to work
+ * correctly.
+ */
+ dbs_data->sampling_rate = max_t(unsigned int,
+ CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
+ cpufreq_policy_transition_delay_us(policy));
if (!have_governor_per_policy())
gov->gdbs_data = dbs_data;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1e55b57..1572129 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -27,7 +27,7 @@ struct cpufreq_stats {
unsigned int *trans_table;
};
-static int cpufreq_stats_update(struct cpufreq_stats *stats)
+static void cpufreq_stats_update(struct cpufreq_stats *stats)
{
unsigned long long cur_time = get_jiffies_64();
@@ -35,7 +35,6 @@ static int cpufreq_stats_update(struct cpufreq_stats *stats)
stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
stats->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
- return 0;
}
static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 628fe89..741f22e 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -25,15 +25,29 @@ static struct regulator *arm_reg;
static struct regulator *pu_reg;
static struct regulator *soc_reg;
-static struct clk *arm_clk;
-static struct clk *pll1_sys_clk;
-static struct clk *pll1_sw_clk;
-static struct clk *step_clk;
-static struct clk *pll2_pfd2_396m_clk;
-
-/* clk used by i.MX6UL */
-static struct clk *pll2_bus_clk;
-static struct clk *secondary_sel_clk;
+enum IMX6_CPUFREQ_CLKS {
+ ARM,
+ PLL1_SYS,
+ STEP,
+ PLL1_SW,
+ PLL2_PFD2_396M,
+ /* MX6UL requires two more clks */
+ PLL2_BUS,
+ SECONDARY_SEL,
+};
+#define IMX6Q_CPUFREQ_CLK_NUM 5
+#define IMX6UL_CPUFREQ_CLK_NUM 7
+
+static int num_clks;
+static struct clk_bulk_data clks[] = {
+ { .id = "arm" },
+ { .id = "pll1_sys" },
+ { .id = "step" },
+ { .id = "pll1_sw" },
+ { .id = "pll2_pfd2_396m" },
+ { .id = "pll2_bus" },
+ { .id = "secondary_sel" },
+};
static struct device *cpu_dev;
static bool free_opp;
@@ -53,7 +67,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
new_freq = freq_table[index].frequency;
freq_hz = new_freq * 1000;
- old_freq = clk_get_rate(arm_clk) / 1000;
+ old_freq = clk_get_rate(clks[ARM].clk) / 1000;
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
@@ -112,29 +126,35 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
* voltage of 528MHz, so lower the CPU frequency to one
* half before changing CPU frequency.
*/
- clk_set_rate(arm_clk, (old_freq >> 1) * 1000);
- clk_set_parent(pll1_sw_clk, pll1_sys_clk);
- if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk))
- clk_set_parent(secondary_sel_clk, pll2_bus_clk);
+ clk_set_rate(clks[ARM].clk, (old_freq >> 1) * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk))
+ clk_set_parent(clks[SECONDARY_SEL].clk,
+ clks[PLL2_BUS].clk);
else
- clk_set_parent(secondary_sel_clk, pll2_pfd2_396m_clk);
- clk_set_parent(step_clk, secondary_sel_clk);
- clk_set_parent(pll1_sw_clk, step_clk);
+ clk_set_parent(clks[SECONDARY_SEL].clk,
+ clks[PLL2_PFD2_396M].clk);
+ clk_set_parent(clks[STEP].clk, clks[SECONDARY_SEL].clk);
+ clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_BUS].clk)) {
+ clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+ }
} else {
- clk_set_parent(step_clk, pll2_pfd2_396m_clk);
- clk_set_parent(pll1_sw_clk, step_clk);
- if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
- clk_set_rate(pll1_sys_clk, new_freq * 1000);
- clk_set_parent(pll1_sw_clk, pll1_sys_clk);
+ clk_set_parent(clks[STEP].clk, clks[PLL2_PFD2_396M].clk);
+ clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk)) {
+ clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
} else {
/* pll1_sys needs to be enabled for divider rate change to work. */
pll1_sys_temp_enabled = true;
- clk_prepare_enable(pll1_sys_clk);
+ clk_prepare_enable(clks[PLL1_SYS].clk);
}
}
/* Ensure the arm clock divider is what we expect */
- ret = clk_set_rate(arm_clk, new_freq * 1000);
+ ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
if (ret) {
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
regulator_set_voltage_tol(arm_reg, volt_old, 0);
@@ -143,7 +163,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
/* PLL1 is only needed until after ARM-PODF is set. */
if (pll1_sys_temp_enabled)
- clk_disable_unprepare(pll1_sys_clk);
+ clk_disable_unprepare(clks[PLL1_SYS].clk);
/* scaling down? scale voltage after frequency */
if (new_freq < old_freq) {
@@ -174,7 +194,7 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
- policy->clk = arm_clk;
+ policy->clk = clks[ARM].clk;
ret = cpufreq_generic_init(policy, freq_table, transition_latency);
policy->suspend_freq = policy->max;
@@ -226,23 +246,61 @@ static void imx6q_opp_check_speed_grading(struct device *dev)
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
- if ((val != OCOTP_CFG3_SPEED_1P2GHZ) &&
- of_machine_is_compatible("fsl,imx6q"))
- if (dev_pm_opp_disable(dev, 1200000000))
- dev_warn(dev, "failed to disable 1.2GHz OPP\n");
if (val < OCOTP_CFG3_SPEED_996MHZ)
if (dev_pm_opp_disable(dev, 996000000))
dev_warn(dev, "failed to disable 996MHz OPP\n");
- if (of_machine_is_compatible("fsl,imx6q")) {
+
+ if (of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6qp")) {
if (val != OCOTP_CFG3_SPEED_852MHZ)
if (dev_pm_opp_disable(dev, 852000000))
dev_warn(dev, "failed to disable 852MHz OPP\n");
+ if (val != OCOTP_CFG3_SPEED_1P2GHZ)
+ if (dev_pm_opp_disable(dev, 1200000000))
+ dev_warn(dev, "failed to disable 1.2GHz OPP\n");
}
iounmap(base);
put_node:
of_node_put(np);
}
+#define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2
+
+static void imx6ul_opp_check_speed_grading(struct device *dev)
+{
+ struct device_node *np;
+ void __iomem *base;
+ u32 val;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
+ if (!np)
+ return;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ dev_err(dev, "failed to map ocotp\n");
+ goto put_node;
+ }
+
+ /*
+ * Speed GRADING[1:0] defines the max speed of ARM:
+ * 2b'00: Reserved;
+ * 2b'01: 528000000Hz;
+ * 2b'10: 696000000Hz;
+ * 2b'11: Reserved;
+ * We need to set the max speed of ARM according to fuse map.
+ */
+ val = readl_relaxed(base + OCOTP_CFG3);
+ val >>= OCOTP_CFG3_SPEED_SHIFT;
+ val &= 0x3;
+ if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
+ if (dev_pm_opp_disable(dev, 696000000))
+ dev_warn(dev, "failed to disable 696MHz OPP\n");
+ iounmap(base);
+put_node:
+ of_node_put(np);
+}
+
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -265,28 +323,15 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
return -ENOENT;
}
- arm_clk = clk_get(cpu_dev, "arm");
- pll1_sys_clk = clk_get(cpu_dev, "pll1_sys");
- pll1_sw_clk = clk_get(cpu_dev, "pll1_sw");
- step_clk = clk_get(cpu_dev, "step");
- pll2_pfd2_396m_clk = clk_get(cpu_dev, "pll2_pfd2_396m");
- if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
- IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
- dev_err(cpu_dev, "failed to get clocks\n");
- ret = -ENOENT;
- goto put_clk;
- }
-
if (of_machine_is_compatible("fsl,imx6ul") ||
- of_machine_is_compatible("fsl,imx6ull")) {
- pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
- secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
- if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {
- dev_err(cpu_dev, "failed to get clocks specific to imx6ul\n");
- ret = -ENOENT;
- goto put_clk;
- }
- }
+ of_machine_is_compatible("fsl,imx6ull"))
+ num_clks = IMX6UL_CPUFREQ_CLK_NUM;
+ else
+ num_clks = IMX6Q_CPUFREQ_CLK_NUM;
+
+ ret = clk_bulk_get(cpu_dev, num_clks, clks);
+ if (ret)
+ goto put_node;
arm_reg = regulator_get(cpu_dev, "arm");
pu_reg = regulator_get_optional(cpu_dev, "pu");
@@ -310,7 +355,10 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_reg;
}
- imx6q_opp_check_speed_grading(cpu_dev);
+ if (of_machine_is_compatible("fsl,imx6ul"))
+ imx6ul_opp_check_speed_grading(cpu_dev);
+ else
+ imx6q_opp_check_speed_grading(cpu_dev);
/* Because we have added the OPPs here, we must free them */
free_opp = true;
@@ -423,22 +471,11 @@ put_reg:
regulator_put(pu_reg);
if (!IS_ERR(soc_reg))
regulator_put(soc_reg);
-put_clk:
- if (!IS_ERR(arm_clk))
- clk_put(arm_clk);
- if (!IS_ERR(pll1_sys_clk))
- clk_put(pll1_sys_clk);
- if (!IS_ERR(pll1_sw_clk))
- clk_put(pll1_sw_clk);
- if (!IS_ERR(step_clk))
- clk_put(step_clk);
- if (!IS_ERR(pll2_pfd2_396m_clk))
- clk_put(pll2_pfd2_396m_clk);
- if (!IS_ERR(pll2_bus_clk))
- clk_put(pll2_bus_clk);
- if (!IS_ERR(secondary_sel_clk))
- clk_put(secondary_sel_clk);
+
+ clk_bulk_put(num_clks, clks);
+put_node:
of_node_put(np);
+
return ret;
}
@@ -452,13 +489,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
if (!IS_ERR(pu_reg))
regulator_put(pu_reg);
regulator_put(soc_reg);
- clk_put(arm_clk);
- clk_put(pll1_sys_clk);
- clk_put(pll1_sw_clk);
- clk_put(step_clk);
- clk_put(pll2_pfd2_396m_clk);
- clk_put(pll2_bus_clk);
- clk_put(secondary_sel_clk);
+
+ clk_bulk_put(num_clks, clks);
return 0;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 93a0e88..7edf7a0 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1595,15 +1595,6 @@ static const struct pstate_funcs knl_funcs = {
.get_val = core_get_val,
};
-static const struct pstate_funcs bxt_funcs = {
- .get_max = core_get_max_pstate,
- .get_max_physical = core_get_max_pstate_physical,
- .get_min = core_get_min_pstate,
- .get_turbo = core_get_turbo_pstate,
- .get_scaling = core_get_scaling,
- .get_val = core_get_val,
-};
-
#define ICPU(model, policy) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
(unsigned long)&policy }
@@ -1627,8 +1618,9 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs),
ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs),
ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs),
- ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_funcs),
- ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, bxt_funcs),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs),
+ ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, core_funcs),
+ ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index c46a12d..5faa37c 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -894,7 +894,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0))
longhaul_setup_voltagescaling();
- policy->cpuinfo.transition_latency = 200000; /* nsec */
+ policy->transition_delay_us = 200000; /* usec */
return cpufreq_table_validate_and_show(policy, longhaul_table);
}
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index e0d5090..8c04ddd 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -310,28 +310,8 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
- struct device_node *np = of_node_get(info->cpu_dev->of_node);
- u32 capacitance = 0;
- if (WARN_ON(!np))
- return;
-
- if (of_find_property(np, "#cooling-cells", NULL)) {
- of_property_read_u32(np, DYNAMIC_POWER, &capacitance);
-
- info->cdev = of_cpufreq_power_cooling_register(np,
- policy, capacitance, NULL);
-
- if (IS_ERR(info->cdev)) {
- dev_err(info->cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(info->cdev));
-
- info->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ info->cdev = of_cpufreq_cooling_register(policy);
}
static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
@@ -574,6 +554,7 @@ static struct platform_driver mtk_cpufreq_platdrv = {
/* List of machines supported by this driver */
static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt2712", },
{ .compatible = "mediatek,mt7622", },
{ .compatible = "mediatek,mt7623", },
{ .compatible = "mediatek,mt817x", },
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
index ed915ee..31513bd 100644
--- a/drivers/cpufreq/mvebu-cpufreq.c
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -76,12 +76,6 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
return PTR_ERR(clk);
}
- /*
- * In case of a failure of dev_pm_opp_add(), we don't
- * bother with cleaning up the registered OPP (there's
- * no function to do so), and simply cancel the
- * registration of the cpufreq device.
- */
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
if (ret) {
clk_put(clk);
@@ -91,7 +85,8 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
if (ret) {
clk_put(clk);
- return ret;
+ dev_err(cpu_dev, "Failed to register OPPs\n");
+ goto opp_register_failed;
}
ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
@@ -99,9 +94,16 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
+ clk_put(clk);
}
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
return 0;
+
+opp_register_failed:
+ /* As registering has failed remove all the opp for all cpus */
+ dev_pm_opp_cpumask_remove_table(cpu_possible_mask);
+
+ return ret;
}
device_initcall(armada_xp_pmsu_cpufreq_init);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index b6d7c4c..29cdec1 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -29,6 +29,7 @@
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/hashtable.h>
#include <trace/events/power.h>
#include <asm/cputhreads.h>
@@ -38,14 +39,13 @@
#include <asm/opal.h>
#include <linux/timer.h>
-#define POWERNV_MAX_PSTATES 256
+#define POWERNV_MAX_PSTATES_ORDER 8
+#define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER))
#define PMSR_PSAFE_ENABLE (1UL << 30)
#define PMSR_SPR_EM_DISABLE (1UL << 31)
-#define PMSR_MAX(x) ((x >> 32) & 0xFF)
+#define MAX_PSTATE_SHIFT 32
#define LPSTATE_SHIFT 48
#define GPSTATE_SHIFT 56
-#define GET_LPSTATE(x) (((x) >> LPSTATE_SHIFT) & 0xFF)
-#define GET_GPSTATE(x) (((x) >> GPSTATE_SHIFT) & 0xFF)
#define MAX_RAMP_DOWN_TIME 5120
/*
@@ -94,6 +94,27 @@ struct global_pstate_info {
};
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
+
+DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
+/**
+ * struct pstate_idx_revmap_data: Entry in the hashmap pstate_revmap
+ * indexed by a function of pstate id.
+ *
+ * @pstate_id: pstate id for this entry.
+ *
+ * @cpufreq_table_idx: Index into the powernv_freqs
+ * cpufreq_frequency_table for frequency
+ * corresponding to pstate_id.
+ *
+ * @hentry: hlist_node that hooks this entry into the pstate_revmap
+ * hashtable
+ */
+struct pstate_idx_revmap_data {
+ u8 pstate_id;
+ unsigned int cpufreq_table_idx;
+ struct hlist_node hentry;
+};
+
static bool rebooting, throttled, occ_reset;
static const char * const throttle_reason[] = {
@@ -148,39 +169,56 @@ static struct powernv_pstate_info {
bool wof_enabled;
} powernv_pstate_info;
-/* Use following macros for conversions between pstate_id and index */
-static inline int idx_to_pstate(unsigned int i)
+static inline u8 extract_pstate(u64 pmsr_val, unsigned int shift)
+{
+ return ((pmsr_val >> shift) & 0xFF);
+}
+
+#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT)
+#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT)
+#define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT)
+
+/* Use following functions for conversions between pstate_id and index */
+
+/**
+ * idx_to_pstate : Returns the pstate id corresponding to the
+ * frequency in the cpufreq frequency table
+ * powernv_freqs indexed by @i.
+ *
+ * If @i is out of bound, this will return the pstate
+ * corresponding to the nominal frequency.
+ */
+static inline u8 idx_to_pstate(unsigned int i)
{
if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
- pr_warn_once("index %u is out of bound\n", i);
+ pr_warn_once("idx_to_pstate: index %u is out of bound\n", i);
return powernv_freqs[powernv_pstate_info.nominal].driver_data;
}
return powernv_freqs[i].driver_data;
}
-static inline unsigned int pstate_to_idx(int pstate)
+/**
+ * pstate_to_idx : Returns the index in the cpufreq frequencytable
+ * powernv_freqs for the frequency whose corresponding
+ * pstate id is @pstate.
+ *
+ * If no frequency corresponding to @pstate is found,
+ * this will return the index of the nominal
+ * frequency.
+ */
+static unsigned int pstate_to_idx(u8 pstate)
{
- int min = powernv_freqs[powernv_pstate_info.min].driver_data;
- int max = powernv_freqs[powernv_pstate_info.max].driver_data;
+ unsigned int key = pstate % POWERNV_MAX_PSTATES;
+ struct pstate_idx_revmap_data *revmap_data;
- if (min > 0) {
- if (unlikely((pstate < max) || (pstate > min))) {
- pr_warn_once("pstate %d is out of bound\n", pstate);
- return powernv_pstate_info.nominal;
- }
- } else {
- if (unlikely((pstate > max) || (pstate < min))) {
- pr_warn_once("pstate %d is out of bound\n", pstate);
- return powernv_pstate_info.nominal;
- }
+ hash_for_each_possible(pstate_revmap, revmap_data, hentry, key) {
+ if (revmap_data->pstate_id == pstate)
+ return revmap_data->cpufreq_table_idx;
}
- /*
- * abs() is deliberately used so that is works with
- * both monotonically increasing and decreasing
- * pstate values
- */
- return abs(pstate - idx_to_pstate(powernv_pstate_info.max));
+
+ pr_warn_once("pstate_to_idx: pstate 0x%x not found\n", pstate);
+ return powernv_pstate_info.nominal;
}
static inline void reset_gpstates(struct cpufreq_policy *policy)
@@ -247,7 +285,7 @@ static int init_powernv_pstates(void)
powernv_pstate_info.wof_enabled = true;
next:
- pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
+ pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
pstate_nominal, pstate_max);
pr_info("Workload Optimized Frequency is %s in the platform\n",
(powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
@@ -278,19 +316,30 @@ next:
powernv_pstate_info.nr_pstates = nr_pstates;
pr_debug("NR PStates %d\n", nr_pstates);
+
for (i = 0; i < nr_pstates; i++) {
u32 id = be32_to_cpu(pstate_ids[i]);
u32 freq = be32_to_cpu(pstate_freqs[i]);
+ struct pstate_idx_revmap_data *revmap_data;
+ unsigned int key;
pr_debug("PState id %d freq %d MHz\n", id, freq);
powernv_freqs[i].frequency = freq * 1000; /* kHz */
- powernv_freqs[i].driver_data = id;
+ powernv_freqs[i].driver_data = id & 0xFF;
+
+ revmap_data = (struct pstate_idx_revmap_data *)
+ kmalloc(sizeof(*revmap_data), GFP_KERNEL);
+
+ revmap_data->pstate_id = id & 0xFF;
+ revmap_data->cpufreq_table_idx = i;
+ key = (revmap_data->pstate_id) % POWERNV_MAX_PSTATES;
+ hash_add(pstate_revmap, &revmap_data->hentry, key);
if (id == pstate_max)
powernv_pstate_info.max = i;
- else if (id == pstate_nominal)
+ if (id == pstate_nominal)
powernv_pstate_info.nominal = i;
- else if (id == pstate_min)
+ if (id == pstate_min)
powernv_pstate_info.min = i;
if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
@@ -307,14 +356,13 @@ next:
}
/* Returns the CPU frequency corresponding to the pstate_id. */
-static unsigned int pstate_id_to_freq(int pstate_id)
+static unsigned int pstate_id_to_freq(u8 pstate_id)
{
int i;
i = pstate_to_idx(pstate_id);
if (i >= powernv_pstate_info.nr_pstates || i < 0) {
- pr_warn("PState id %d outside of PState table, "
- "reporting nominal id %d instead\n",
+ pr_warn("PState id 0x%x outside of PState table, reporting nominal id 0x%x instead\n",
pstate_id, idx_to_pstate(powernv_pstate_info.nominal));
i = powernv_pstate_info.nominal;
}
@@ -420,8 +468,8 @@ static inline void set_pmspr(unsigned long sprn, unsigned long val)
*/
struct powernv_smp_call_data {
unsigned int freq;
- int pstate_id;
- int gpstate_id;
+ u8 pstate_id;
+ u8 gpstate_id;
};
/*
@@ -438,22 +486,15 @@ struct powernv_smp_call_data {
static void powernv_read_cpu_freq(void *arg)
{
unsigned long pmspr_val;
- s8 local_pstate_id;
struct powernv_smp_call_data *freq_data = arg;
pmspr_val = get_pmspr(SPRN_PMSR);
-
- /*
- * The local pstate id corresponds bits 48..55 in the PMSR.
- * Note: Watch out for the sign!
- */
- local_pstate_id = (pmspr_val >> 48) & 0xFF;
- freq_data->pstate_id = local_pstate_id;
+ freq_data->pstate_id = extract_local_pstate(pmspr_val);
freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
- pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
- raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
- freq_data->freq);
+ pr_debug("cpu %d pmsr %016lX pstate_id 0x%x frequency %d kHz\n",
+ raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
+ freq_data->freq);
}
/*
@@ -515,21 +556,21 @@ static void powernv_cpufreq_throttle_check(void *data)
struct chip *chip;
unsigned int cpu = smp_processor_id();
unsigned long pmsr;
- int pmsr_pmax;
+ u8 pmsr_pmax;
unsigned int pmsr_pmax_idx;
pmsr = get_pmspr(SPRN_PMSR);
chip = this_cpu_read(chip_info);
/* Check for Pmax Capping */
- pmsr_pmax = (s8)PMSR_MAX(pmsr);
+ pmsr_pmax = extract_max_pstate(pmsr);
pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
if (pmsr_pmax_idx != powernv_pstate_info.max) {
if (chip->throttled)
goto next;
chip->throttled = true;
if (pmsr_pmax_idx > powernv_pstate_info.nominal) {
- pr_warn_once("CPU %d on Chip %u has Pmax(%d) reduced below nominal frequency(%d)\n",
+ pr_warn_once("CPU %d on Chip %u has Pmax(0x%x) reduced below that of nominal frequency(0x%x)\n",
cpu, chip->id, pmsr_pmax,
idx_to_pstate(powernv_pstate_info.nominal));
chip->throttle_sub_turbo++;
@@ -645,8 +686,8 @@ void gpstate_timer_handler(struct timer_list *t)
* value. Hence, read from PMCR to get correct data.
*/
val = get_pmspr(SPRN_PMCR);
- freq_data.gpstate_id = (s8)GET_GPSTATE(val);
- freq_data.pstate_id = (s8)GET_LPSTATE(val);
+ freq_data.gpstate_id = extract_global_pstate(val);
+ freq_data.pstate_id = extract_local_pstate(val);
if (freq_data.gpstate_id == freq_data.pstate_id) {
reset_gpstates(policy);
spin_unlock(&gpstates->gpstate_lock);
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 4ada55b..0562761a 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -275,20 +275,8 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
{
struct cpu_data *cpud = policy->driver_data;
- struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
- if (of_find_property(np, "#cooling-cells", NULL)) {
- cpud->cdev = of_cpufreq_cooling_register(np, policy);
-
- if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) {
- pr_err("cpu%d is not running as cooling device: %ld\n",
- policy->cpu, PTR_ERR(cpud->cdev));
-
- cpud->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ cpud->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver qoriq_cpufreq_driver = {
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 05d2990..247fcbf 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -18,27 +18,89 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_cooling.h>
+#include <linux/export.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/pm_opp.h>
#include <linux/scpi_protocol.h>
+#include <linux/slab.h>
#include <linux/types.h>
-#include "arm_big_little.h"
+struct scpi_data {
+ struct clk *clk;
+ struct device *cpu_dev;
+ struct thermal_cooling_device *cdev;
+};
static struct scpi_ops *scpi_ops;
-static int scpi_get_transition_latency(struct device *cpu_dev)
+static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ struct scpi_data *priv = policy->driver_data;
+ unsigned long rate = clk_get_rate(priv->clk);
+
+ return rate / 1000;
+}
+
+static int
+scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct scpi_data *priv = policy->driver_data;
+ u64 rate = policy->freq_table[index].frequency * 1000;
+ int ret;
+
+ ret = clk_set_rate(priv->clk, rate);
+ if (!ret && (clk_get_rate(priv->clk) != rate))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int
+scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
- return scpi_ops->get_transition_latency(cpu_dev);
+ int cpu, domain, tdomain;
+ struct device *tcpu_dev;
+
+ domain = scpi_ops->device_domain_id(cpu_dev);
+ if (domain < 0)
+ return domain;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ tcpu_dev = get_cpu_device(cpu);
+ if (!tcpu_dev)
+ continue;
+
+ tdomain = scpi_ops->device_domain_id(tcpu_dev);
+ if (tdomain == domain)
+ cpumask_set_cpu(cpu, cpumask);
+ }
+
+ return 0;
}
-static int scpi_init_opp_table(const struct cpumask *cpumask)
+static int scpi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
- struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
+ unsigned int latency;
+ struct device *cpu_dev;
+ struct scpi_data *priv;
+ struct cpufreq_frequency_table *freq_table;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", policy->cpu);
+ return -ENODEV;
+ }
ret = scpi_ops->add_opps_to_device(cpu_dev);
if (ret) {
@@ -46,32 +108,133 @@ static int scpi_init_opp_table(const struct cpumask *cpumask)
return ret;
}
- ret = dev_pm_opp_set_sharing_cpus(cpu_dev, cpumask);
- if (ret)
+ ret = scpi_get_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to get sharing cpumask\n");
+ return ret;
+ }
+
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
+ return ret;
+ }
+
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free_opp;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto out_free_priv;
+ }
+
+ priv->cpu_dev = cpu_dev;
+ priv->clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d\n",
+ __func__, cpu_dev->id);
+ goto out_free_cpufreq_table;
+ }
+
+ policy->driver_data = priv;
+
+ ret = cpufreq_table_validate_and_show(policy, freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
+ ret);
+ goto out_put_clk;
+ }
+
+ /* scpi allows DVFS request for any domain from any CPU */
+ policy->dvfs_possible_from_any_cpu = true;
+
+ latency = scpi_ops->get_transition_latency(cpu_dev);
+ if (!latency)
+ latency = CPUFREQ_ETERNAL;
+
+ policy->cpuinfo.transition_latency = latency;
+
+ policy->fast_switch_possible = false;
+ return 0;
+
+out_put_clk:
+ clk_put(priv->clk);
+out_free_cpufreq_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_priv:
+ kfree(priv);
+out_free_opp:
+ dev_pm_opp_cpumask_remove_table(policy->cpus);
+
return ret;
}
-static const struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
- .name = "scpi",
- .get_transition_latency = scpi_get_transition_latency,
- .init_opp_table = scpi_init_opp_table,
- .free_opp_table = dev_pm_opp_cpumask_remove_table,
+static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct scpi_data *priv = policy->driver_data;
+
+ cpufreq_cooling_unregister(priv->cdev);
+ clk_put(priv->clk);
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ kfree(priv);
+ dev_pm_opp_cpumask_remove_table(policy->related_cpus);
+
+ return 0;
+}
+
+static void scpi_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct scpi_data *priv = policy->driver_data;
+ struct thermal_cooling_device *cdev;
+
+ cdev = of_cpufreq_cooling_register(policy);
+ if (!IS_ERR(cdev))
+ priv->cdev = cdev;
+}
+
+static struct cpufreq_driver scpi_cpufreq_driver = {
+ .name = "scpi-cpufreq",
+ .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .attr = cpufreq_generic_attr,
+ .get = scpi_cpufreq_get_rate,
+ .init = scpi_cpufreq_init,
+ .exit = scpi_cpufreq_exit,
+ .ready = scpi_cpufreq_ready,
+ .target_index = scpi_cpufreq_set_target,
};
static int scpi_cpufreq_probe(struct platform_device *pdev)
{
+ int ret;
+
scpi_ops = get_scpi_ops();
if (!scpi_ops)
return -EIO;
- return bL_cpufreq_register(&scpi_cpufreq_ops);
+ ret = cpufreq_register_driver(&scpi_cpufreq_driver);
+ if (ret)
+ dev_err(&pdev->dev, "%s: registering cpufreq failed, err: %d\n",
+ __func__, ret);
+ return ret;
}
static int scpi_cpufreq_remove(struct platform_device *pdev)
{
- bL_cpufreq_unregister(&scpi_cpufreq_ops);
+ cpufreq_unregister_driver(&scpi_cpufreq_driver);
scpi_ops = NULL;
return 0;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 923317f..a099b7bf 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -50,6 +51,7 @@ struct ti_cpufreq_soc_data {
unsigned long efuse_mask;
unsigned long efuse_shift;
unsigned long rev_offset;
+ bool multi_regulator;
};
struct ti_cpufreq_data {
@@ -57,6 +59,7 @@ struct ti_cpufreq_data {
struct device_node *opp_node;
struct regmap *syscon;
const struct ti_cpufreq_soc_data *soc_data;
+ struct opp_table *opp_table;
};
static unsigned long amx3_efuse_xlate(struct ti_cpufreq_data *opp_data,
@@ -95,6 +98,7 @@ static struct ti_cpufreq_soc_data am3x_soc_data = {
.efuse_offset = 0x07fc,
.efuse_mask = 0x1fff,
.rev_offset = 0x600,
+ .multi_regulator = false,
};
static struct ti_cpufreq_soc_data am4x_soc_data = {
@@ -103,6 +107,7 @@ static struct ti_cpufreq_soc_data am4x_soc_data = {
.efuse_offset = 0x0610,
.efuse_mask = 0x3f,
.rev_offset = 0x600,
+ .multi_regulator = false,
};
static struct ti_cpufreq_soc_data dra7_soc_data = {
@@ -111,6 +116,7 @@ static struct ti_cpufreq_soc_data dra7_soc_data = {
.efuse_mask = 0xf80000,
.efuse_shift = 19,
.rev_offset = 0x204,
+ .multi_regulator = true,
};
/**
@@ -195,12 +201,14 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{},
};
-static int ti_cpufreq_init(void)
+static int ti_cpufreq_probe(struct platform_device *pdev)
{
u32 version[VERSION_COUNT];
struct device_node *np;
const struct of_device_id *match;
+ struct opp_table *ti_opp_table;
struct ti_cpufreq_data *opp_data;
+ const char * const reg_names[] = {"vdd", "vbb"};
int ret;
np = of_find_node_by_path("/");
@@ -247,16 +255,29 @@ static int ti_cpufreq_init(void)
if (ret)
goto fail_put_node;
- ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
- version, VERSION_COUNT));
- if (ret) {
+ ti_opp_table = dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
+ version, VERSION_COUNT);
+ if (IS_ERR(ti_opp_table)) {
dev_err(opp_data->cpu_dev,
"Failed to set supported hardware\n");
+ ret = PTR_ERR(ti_opp_table);
goto fail_put_node;
}
- of_node_put(opp_data->opp_node);
+ opp_data->opp_table = ti_opp_table;
+
+ if (opp_data->soc_data->multi_regulator) {
+ ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev,
+ reg_names,
+ ARRAY_SIZE(reg_names));
+ if (IS_ERR(ti_opp_table)) {
+ dev_pm_opp_put_supported_hw(opp_data->opp_table);
+ ret = PTR_ERR(ti_opp_table);
+ goto fail_put_node;
+ }
+ }
+ of_node_put(opp_data->opp_node);
register_cpufreq_dt:
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
@@ -269,4 +290,22 @@ free_opp_data:
return ret;
}
-device_initcall(ti_cpufreq_init);
+
+static int ti_cpufreq_init(void)
+{
+ platform_device_register_simple("ti-cpufreq", -1, NULL, 0);
+ return 0;
+}
+module_init(ti_cpufreq_init);
+
+static struct platform_driver ti_cpufreq_driver = {
+ .probe = ti_cpufreq_probe,
+ .driver = {
+ .name = "ti-cpufreq",
+ },
+};
+module_platform_driver(ti_cpufreq_driver);
+
+MODULE_DESCRIPTION("TI CPUFreq/OPP hw-supported driver");
+MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 4e78263..5d359af 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -36,14 +36,15 @@ static struct cpuidle_governor * __cpuidle_find_governor(const char *str)
/**
* cpuidle_switch_governor - changes the governor
* @gov: the new target governor
- *
- * NOTE: "gov" can be NULL to specify disabled
* Must be called with cpuidle_lock acquired.
*/
int cpuidle_switch_governor(struct cpuidle_governor *gov)
{
struct cpuidle_device *dev;
+ if (!gov)
+ return -EINVAL;
+
if (gov == cpuidle_curr_governor)
return 0;
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 3e104f5..b56b3f7 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_AUTHENC
+ select CRYPTO_GF128MUL
---help---
The Chelsio Crypto Co-processor driver for T6 adapters.
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 89ba9e8..4bcef78 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -607,6 +607,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
ndesc = ctx->handle_result(priv, ring, sreq->req,
&should_complete, &ret);
if (ndesc < 0) {
+ kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
return;
}
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 5438552..fcc0a60 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -14,6 +14,7 @@
#include <crypto/aes.h>
#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
#include "safexcel.h"
@@ -33,6 +34,10 @@ struct safexcel_cipher_ctx {
unsigned int key_len;
};
+struct safexcel_cipher_req {
+ bool needs_inv;
+};
+
static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
struct crypto_async_request *async,
struct safexcel_command_desc *cdesc,
@@ -126,9 +131,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
return 0;
}
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
{
struct skcipher_request *req = skcipher_request_cast(async);
struct safexcel_result_desc *rdesc;
@@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request *async,
spin_unlock_bh(&priv->ring[ring].egress_lock);
request->req = &req->base;
- ctx->base.handle_result = safexcel_handle_result;
*commands = n_cdesc;
*results = n_rdesc;
@@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
- ctx->base.needs_inv = false;
- ctx->base.send = safexcel_aes_send;
spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
return ndesc;
}
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ int err;
+
+ if (sreq->needs_inv) {
+ sreq->needs_inv = false;
+ err = safexcel_handle_inv_result(priv, ring, async,
+ should_complete, ret);
+ } else {
+ err = safexcel_handle_req_result(priv, ring, async,
+ should_complete, ret);
+ }
+
+ return err;
+}
+
static int safexcel_cipher_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
@@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- ctx->base.handle_result = safexcel_handle_inv_result;
-
ret = safexcel_invalidate_cache(async, &ctx->base, priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
@@ -381,28 +401,46 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
return 0;
}
+static int safexcel_send(struct crypto_async_request *async,
+ int ring, struct safexcel_request *request,
+ int *commands, int *results)
+{
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ int ret;
+
+ if (sreq->needs_inv)
+ ret = safexcel_cipher_send_inv(async, ring, request,
+ commands, results);
+ else
+ ret = safexcel_aes_send(async, ring, request,
+ commands, results);
+ return ret;
+}
+
static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- struct skcipher_request req;
+ SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
- memset(&req, 0, sizeof(struct skcipher_request));
+ memset(req, 0, sizeof(struct skcipher_request));
/* create invalidation request */
init_completion(&result.completion);
- skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_inv_complete, &result);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ safexcel_inv_complete, &result);
- skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm));
- ctx = crypto_tfm_ctx(req.base.tfm);
+ skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
+ ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
- ctx->base.send = safexcel_cipher_send_inv;
+ sreq->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+ crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->ring[ring].need_dequeue)
@@ -424,19 +462,21 @@ static int safexcel_aes(struct skcipher_request *req,
enum safexcel_cipher_direction dir, u32 mode)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_crypto_priv *priv = ctx->priv;
int ret, ring;
+ sreq->needs_inv = false;
ctx->direction = dir;
ctx->mode = mode;
if (ctx->base.ctxr) {
- if (ctx->base.needs_inv)
- ctx->base.send = safexcel_cipher_send_inv;
+ if (ctx->base.needs_inv) {
+ sreq->needs_inv = true;
+ ctx->base.needs_inv = false;
+ }
} else {
ctx->base.ring = safexcel_select_ring(priv);
- ctx->base.send = safexcel_aes_send;
-
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
EIP197_GFP_FLAGS(req->base),
&ctx->base.ctxr_dma);
@@ -476,6 +516,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
alg.skcipher.base);
ctx->priv = tmpl->priv;
+ ctx->base.send = safexcel_send;
+ ctx->base.handle_result = safexcel_handle_result;
+
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct safexcel_cipher_req));
return 0;
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 74feb62..0c5a582 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -32,9 +32,10 @@ struct safexcel_ahash_req {
bool last_req;
bool finish;
bool hmac;
+ bool needs_inv;
u8 state_sz; /* expected sate size, only set once */
- u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
+ u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
u64 len;
u64 processed;
@@ -119,15 +120,15 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
}
}
-static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
- struct crypto_async_request *async,
- bool *should_complete, int *ret)
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
{
struct safexcel_result_desc *rdesc;
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
- int cache_len, result_sz = sreq->state_sz;
+ int cache_len;
*ret = 0;
@@ -148,8 +149,8 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
spin_unlock_bh(&priv->ring[ring].egress_lock);
if (sreq->finish)
- result_sz = crypto_ahash_digestsize(ahash);
- memcpy(sreq->state, areq->result, result_sz);
+ memcpy(areq->result, sreq->state,
+ crypto_ahash_digestsize(ahash));
dma_unmap_sg(priv->dev, areq->src,
sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
@@ -165,9 +166,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
return 1;
}
-static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
- struct safexcel_request *request, int *commands,
- int *results)
+static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
+ struct safexcel_request *request,
+ int *commands, int *results)
{
struct ahash_request *areq = ahash_request_cast(async);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
@@ -273,7 +274,7 @@ send_command:
/* Add the token */
safexcel_hash_token(first_cdesc, len, req->state_sz);
- ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
+ ctx->base.result_dma = dma_map_single(priv->dev, req->state,
req->state_sz, DMA_FROM_DEVICE);
if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
ret = -EINVAL;
@@ -292,7 +293,6 @@ send_command:
req->processed += len;
request->req = &areq->base;
- ctx->base.handle_result = safexcel_handle_result;
*commands = n_cdesc;
*results = 1;
@@ -374,8 +374,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
ring = safexcel_select_ring(priv);
ctx->base.ring = ring;
- ctx->base.needs_inv = false;
- ctx->base.send = safexcel_ahash_send;
spin_lock_bh(&priv->ring[ring].queue_lock);
enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
@@ -392,6 +390,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
return 1;
}
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+ struct crypto_async_request *async,
+ bool *should_complete, int *ret)
+{
+ struct ahash_request *areq = ahash_request_cast(async);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ int err;
+
+ if (req->needs_inv) {
+ req->needs_inv = false;
+ err = safexcel_handle_inv_result(priv, ring, async,
+ should_complete, ret);
+ } else {
+ err = safexcel_handle_req_result(priv, ring, async,
+ should_complete, ret);
+ }
+
+ return err;
+}
+
static int safexcel_ahash_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request,
int *commands, int *results)
@@ -400,7 +418,6 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
- ctx->base.handle_result = safexcel_handle_inv_result;
ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
@@ -412,28 +429,46 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
return 0;
}
+static int safexcel_ahash_send(struct crypto_async_request *async,
+ int ring, struct safexcel_request *request,
+ int *commands, int *results)
+{
+ struct ahash_request *areq = ahash_request_cast(async);
+ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ int ret;
+
+ if (req->needs_inv)
+ ret = safexcel_ahash_send_inv(async, ring, request,
+ commands, results);
+ else
+ ret = safexcel_ahash_send_req(async, ring, request,
+ commands, results);
+ return ret;
+}
+
static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- struct ahash_request req;
+ AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
+ struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
- memset(&req, 0, sizeof(struct ahash_request));
+ memset(req, 0, sizeof(struct ahash_request));
/* create invalidation request */
init_completion(&result.completion);
- ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
safexcel_inv_complete, &result);
- ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm));
- ctx = crypto_tfm_ctx(req.base.tfm);
+ ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
+ ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true;
- ctx->base.send = safexcel_ahash_send_inv;
+ rctx->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+ crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->ring[ring].need_dequeue)
@@ -481,14 +516,16 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
struct safexcel_crypto_priv *priv = ctx->priv;
int ret, ring;
- ctx->base.send = safexcel_ahash_send;
+ req->needs_inv = false;
if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
if (ctx->base.ctxr) {
- if (ctx->base.needs_inv)
- ctx->base.send = safexcel_ahash_send_inv;
+ if (ctx->base.needs_inv) {
+ ctx->base.needs_inv = false;
+ req->needs_inv = true;
+ }
} else {
ctx->base.ring = safexcel_select_ring(priv);
ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
@@ -622,6 +659,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
struct safexcel_alg_template, alg.ahash);
ctx->priv = tmpl->priv;
+ ctx->base.send = safexcel_ahash_send;
+ ctx->base.handle_result = safexcel_handle_result;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct safexcel_ahash_req));
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 48de52c..662e709 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1625,6 +1625,7 @@ static int queue_cache_init(void)
CWQ_ENTRY_SIZE, 0, NULL);
if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+ queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
return -ENOMEM;
}
return 0;
@@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void)
{
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+ queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
+ queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
}
static long spu_queue_register_workfn(void *arg)
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 78fb496..fe2af6a 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -737,7 +737,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
devfreq = devfreq_add_device(dev, profile, governor_name, data);
if (IS_ERR(devfreq)) {
devres_free(ptr);
- return ERR_PTR(-ENOMEM);
+ return devfreq;
}
*ptr = devfreq;
@@ -996,7 +996,8 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
if (df->governor == governor) {
ret = 0;
goto out;
- } else if (df->governor->immutable || governor->immutable) {
+ } else if ((df->governor && df->governor->immutable) ||
+ governor->immutable) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fbab271..a861b5b 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -708,7 +708,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
unsigned long flags)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
- struct data_chunk *first = xt->sgl;
+ struct data_chunk *first;
struct at_desc *desc = NULL;
size_t xfer_count;
unsigned int dwidth;
@@ -720,6 +720,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
return NULL;
+ first = xt->sgl;
+
dev_info(chan2dev(chan),
"%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
__func__, &xt->src_start, &xt->dst_start, xt->numf,
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index d50273f..afd5e10 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -555,7 +555,7 @@ static int jz4740_dma_probe(struct platform_device *pdev)
ret = dma_async_device_register(dd);
if (ret)
- return ret;
+ goto err_clk;
irq = platform_get_irq(pdev, 0);
ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
@@ -568,6 +568,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
err_unregister:
dma_async_device_unregister(dd);
+err_clk:
+ clk_disable_unprepare(dmadev->clk);
return ret;
}
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 47edc7f..ec5f9d2 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -155,6 +155,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
#define PATTERN_COUNT_MASK 0x1f
#define PATTERN_MEMSET_IDX 0x01
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+ bool done;
+ wait_queue_head_t *wait;
+};
+
struct dmatest_thread {
struct list_head node;
struct dmatest_info *info;
@@ -165,6 +171,8 @@ struct dmatest_thread {
u8 **dsts;
u8 **udsts;
enum dma_transaction_type type;
+ wait_queue_head_t done_wait;
+ struct dmatest_done test_done;
bool done;
};
@@ -342,18 +350,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
return error_count;
}
-/* poor man's completion - we want to use wait_event_freezable() on it */
-struct dmatest_done {
- bool done;
- wait_queue_head_t *wait;
-};
static void dmatest_callback(void *arg)
{
struct dmatest_done *done = arg;
-
- done->done = true;
- wake_up_all(done->wait);
+ struct dmatest_thread *thread =
+ container_of(arg, struct dmatest_thread, done_wait);
+ if (!thread->done) {
+ done->done = true;
+ wake_up_all(done->wait);
+ } else {
+ /*
+ * If thread->done, it means that this callback occurred
+ * after the parent thread has cleaned up. This can
+ * happen in the case that driver doesn't implement
+ * the terminate_all() functionality and a dma operation
+ * did not occur within the timeout period
+ */
+ WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
+ }
}
static unsigned int min_odd(unsigned int x, unsigned int y)
@@ -424,9 +439,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
*/
static int dmatest_func(void *data)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
struct dmatest_thread *thread = data;
- struct dmatest_done done = { .wait = &done_wait };
+ struct dmatest_done *done = &thread->test_done;
struct dmatest_info *info;
struct dmatest_params *params;
struct dma_chan *chan;
@@ -673,9 +687,9 @@ static int dmatest_func(void *data)
continue;
}
- done.done = false;
+ done->done = false;
tx->callback = dmatest_callback;
- tx->callback_param = &done;
+ tx->callback_param = done;
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
@@ -688,21 +702,12 @@ static int dmatest_func(void *data)
}
dma_async_issue_pending(chan);
- wait_event_freezable_timeout(done_wait, done.done,
+ wait_event_freezable_timeout(thread->done_wait, done->done,
msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
- if (!done.done) {
- /*
- * We're leaving the timed out dma operation with
- * dangling pointer to done_wait. To make this
- * correct, we'll need to allocate wait_done for
- * each test iteration and perform "who's gonna
- * free it this time?" dancing. For now, just
- * leave it dangling.
- */
- WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
+ if (!done->done) {
dmaengine_unmap_put(um);
result("test timed out", total_tests, src_off, dst_off,
len, 0);
@@ -789,7 +794,7 @@ err_thread_type:
dmatest_KBs(runtime, total_len), ret);
/* terminate all transfers on specified channels */
- if (ret)
+ if (ret || failed_tests)
dmaengine_terminate_all(chan);
thread->done = true;
@@ -849,6 +854,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
thread->info = info;
thread->chan = dtc->chan;
thread->type = type;
+ thread->test_done.wait = &thread->done_wait;
+ init_waitqueue_head(&thread->done_wait);
smp_wmb();
thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
dma_chan_name(chan), op, i);
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 6775f2c..c756886 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -863,11 +863,11 @@ static void fsl_edma_irq_exit(
}
}
-static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma)
+static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
{
int i;
- for (i = 0; i < DMAMUX_NR; i++)
+ for (i = 0; i < nr_clocks; i++)
clk_disable_unprepare(fsl_edma->muxclk[i]);
}
@@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(fsl_edma->muxbase[i]))
+ if (IS_ERR(fsl_edma->muxbase[i])) {
+ /* on error: disable all previously enabled clks */
+ fsl_disable_clocks(fsl_edma, i);
return PTR_ERR(fsl_edma->muxbase[i]);
+ }
sprintf(clkname, "dmamux%d", i);
fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
if (IS_ERR(fsl_edma->muxclk[i])) {
dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
+ /* on error: disable all previously enabled clks */
+ fsl_disable_clocks(fsl_edma, i);
return PTR_ERR(fsl_edma->muxclk[i]);
}
ret = clk_prepare_enable(fsl_edma->muxclk[i]);
- if (ret) {
- /* disable only clks which were enabled on error */
- for (; i >= 0; i--)
- clk_disable_unprepare(fsl_edma->muxclk[i]);
-
- dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
- return ret;
- }
+ if (ret)
+ /* on error: disable all previously enabled clks */
+ fsl_disable_clocks(fsl_edma, i);
}
@@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA engine. (%d)\n", ret);
- fsl_disable_clocks(fsl_edma);
+ fsl_disable_clocks(fsl_edma, DMAMUX_NR);
return ret;
}
@@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Can't register Freescale eDMA of_dma. (%d)\n", ret);
dma_async_device_unregister(&fsl_edma->dma_dev);
- fsl_disable_clocks(fsl_edma);
+ fsl_disable_clocks(fsl_edma, DMAMUX_NR);
return ret;
}
@@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev)
fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_edma->dma_dev);
- fsl_disable_clocks(fsl_edma);
+ fsl_disable_clocks(fsl_edma, DMAMUX_NR);
return 0;
}
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 58d4ccd..8b5b23a 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -597,7 +597,6 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
for (i = 0; i < active && !seen_current; i++) {
struct dma_async_tx_descriptor *tx;
- smp_read_barrier_depends();
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
desc = ioat_get_ring_ent(ioat_chan, idx + i);
dump_desc_dbg(ioat_chan, desc);
@@ -715,7 +714,6 @@ static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
for (i = 1; i < active; i++) {
struct dma_async_tx_descriptor *tx;
- smp_read_barrier_depends();
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
desc = ioat_get_ring_ent(ioat_chan, idx + i);
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 2f31d3d..7792a91 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -390,7 +390,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
if (memcmp(src, dest, IOAT_TEST_SIZE)) {
dev_err(dev, "Self-test copy failed compare, disabling\n");
err = -ENODEV;
- goto free_resources;
+ goto unmap_dma;
}
unmap_dma:
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2b2c7db..35c3936 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1615,22 +1615,6 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
* Power management
*/
-#ifdef CONFIG_PM_SLEEP
-static int rcar_dmac_sleep_suspend(struct device *dev)
-{
- /*
- * TODO: Wait for the current transfer to complete and stop the device.
- */
- return 0;
-}
-
-static int rcar_dmac_sleep_resume(struct device *dev)
-{
- /* TODO: Resume transfers, if any. */
- return 0;
-}
-#endif
-
#ifdef CONFIG_PM
static int rcar_dmac_runtime_suspend(struct device *dev)
{
@@ -1646,7 +1630,13 @@ static int rcar_dmac_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops rcar_dmac_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
+ /*
+ * TODO for system sleep/resume:
+ * - Wait for the current transfer to complete and stop the device,
+ * - Resume transfers, if any.
+ */
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
NULL)
};
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 96afb2a..3c40170 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -457,4 +457,11 @@ config EDAC_XGENE
Support for error detection and correction on the
APM X-Gene family of SOCs.
+config EDAC_TI
+ tristate "Texas Instruments DDR3 ECC Controller"
+ depends on ARCH_KEYSTONE || SOC_DRA7XX
+ help
+ Support for error detection and correction on the
+ TI SoCs.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 0fd9ffa..b54912e 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -78,3 +78,4 @@ obj-$(CONFIG_EDAC_THUNDERX) += thunderx_edac.o
obj-$(CONFIG_EDAC_ALTERA) += altera_edac.o
obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o
+obj-$(CONFIG_EDAC_TI) += ti_edac.o
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index ec5d695..3c68bb5 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -758,7 +758,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
/* Non-ECC RAM? */
printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
res = -ENODEV;
- goto err2;
+ goto err;
}
edac_dbg(3, "init mci\n");
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index 9c1ffe3..aeb222c 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -78,6 +78,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
if (!pvt->inject)
int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
else {
+ int_reg.u64 = 0;
if (pvt->error_type == 1)
int_reg.s.sec_err = 1;
if (pvt->error_type == 2)
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
new file mode 100644
index 0000000..6ac26d1
--- /dev/null
+++ b/drivers/edac/ti_edac.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Texas Instruments DDR3 ECC error correction and detection driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/edac.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+
+#include "edac_module.h"
+
+/* EMIF controller registers */
+#define EMIF_SDRAM_CONFIG 0x008
+#define EMIF_IRQ_STATUS 0x0ac
+#define EMIF_IRQ_ENABLE_SET 0x0b4
+#define EMIF_ECC_CTRL 0x110
+#define EMIF_1B_ECC_ERR_CNT 0x130
+#define EMIF_1B_ECC_ERR_THRSH 0x134
+#define EMIF_1B_ECC_ERR_ADDR_LOG 0x13c
+#define EMIF_2B_ECC_ERR_ADDR_LOG 0x140
+
+/* Bit definitions for EMIF_SDRAM_CONFIG */
+#define SDRAM_TYPE_SHIFT 29
+#define SDRAM_TYPE_MASK GENMASK(31, 29)
+#define SDRAM_TYPE_DDR3 (3 << SDRAM_TYPE_SHIFT)
+#define SDRAM_TYPE_DDR2 (2 << SDRAM_TYPE_SHIFT)
+#define SDRAM_NARROW_MODE_MASK GENMASK(15, 14)
+#define SDRAM_K2_NARROW_MODE_SHIFT 12
+#define SDRAM_K2_NARROW_MODE_MASK GENMASK(13, 12)
+#define SDRAM_ROWSIZE_SHIFT 7
+#define SDRAM_ROWSIZE_MASK GENMASK(9, 7)
+#define SDRAM_IBANK_SHIFT 4
+#define SDRAM_IBANK_MASK GENMASK(6, 4)
+#define SDRAM_K2_IBANK_SHIFT 5
+#define SDRAM_K2_IBANK_MASK GENMASK(6, 5)
+#define SDRAM_K2_EBANK_SHIFT 3
+#define SDRAM_K2_EBANK_MASK BIT(SDRAM_K2_EBANK_SHIFT)
+#define SDRAM_PAGESIZE_SHIFT 0
+#define SDRAM_PAGESIZE_MASK GENMASK(2, 0)
+#define SDRAM_K2_PAGESIZE_SHIFT 0
+#define SDRAM_K2_PAGESIZE_MASK GENMASK(1, 0)
+
+#define EMIF_1B_ECC_ERR_THRSH_SHIFT 24
+
+/* IRQ bit definitions */
+#define EMIF_1B_ECC_ERR BIT(5)
+#define EMIF_2B_ECC_ERR BIT(4)
+#define EMIF_WR_ECC_ERR BIT(3)
+#define EMIF_SYS_ERR BIT(0)
+/* Bit 31 enables ECC and 28 enables RMW */
+#define ECC_ENABLED (BIT(31) | BIT(28))
+
+#define EDAC_MOD_NAME "ti-emif-edac"
+
+enum {
+ EMIF_TYPE_DRA7,
+ EMIF_TYPE_K2
+};
+
+struct ti_edac {
+ void __iomem *reg;
+};
+
+static u32 ti_edac_readl(struct ti_edac *edac, u16 offset)
+{
+ return readl_relaxed(edac->reg + offset);
+}
+
+static void ti_edac_writel(struct ti_edac *edac, u32 val, u16 offset)
+{
+ writel_relaxed(val, edac->reg + offset);
+}
+
+static irqreturn_t ti_edac_isr(int irq, void *data)
+{
+ struct mem_ctl_info *mci = data;
+ struct ti_edac *edac = mci->pvt_info;
+ u32 irq_status;
+ u32 err_addr;
+ int err_count;
+
+ irq_status = ti_edac_readl(edac, EMIF_IRQ_STATUS);
+
+ if (irq_status & EMIF_1B_ECC_ERR) {
+ err_addr = ti_edac_readl(edac, EMIF_1B_ECC_ERR_ADDR_LOG);
+ err_count = ti_edac_readl(edac, EMIF_1B_ECC_ERR_CNT);
+ ti_edac_writel(edac, err_count, EMIF_1B_ECC_ERR_CNT);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
+ err_addr >> PAGE_SHIFT,
+ err_addr & ~PAGE_MASK, -1, 0, 0, 0,
+ mci->ctl_name, "1B");
+ }
+
+ if (irq_status & EMIF_2B_ECC_ERR) {
+ err_addr = ti_edac_readl(edac, EMIF_2B_ECC_ERR_ADDR_LOG);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ err_addr >> PAGE_SHIFT,
+ err_addr & ~PAGE_MASK, -1, 0, 0, 0,
+ mci->ctl_name, "2B");
+ }
+
+ if (irq_status & EMIF_WR_ECC_ERR)
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ 0, 0, -1, 0, 0, 0,
+ mci->ctl_name, "WR");
+
+ ti_edac_writel(edac, irq_status, EMIF_IRQ_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void ti_edac_setup_dimm(struct mem_ctl_info *mci, u32 type)
+{
+ struct dimm_info *dimm;
+ struct ti_edac *edac = mci->pvt_info;
+ int bits;
+ u32 val;
+ u32 memsize;
+
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, 0, 0, 0);
+
+ val = ti_edac_readl(edac, EMIF_SDRAM_CONFIG);
+
+ if (type == EMIF_TYPE_DRA7) {
+ bits = ((val & SDRAM_PAGESIZE_MASK) >> SDRAM_PAGESIZE_SHIFT) + 8;
+ bits += ((val & SDRAM_ROWSIZE_MASK) >> SDRAM_ROWSIZE_SHIFT) + 9;
+ bits += (val & SDRAM_IBANK_MASK) >> SDRAM_IBANK_SHIFT;
+
+ if (val & SDRAM_NARROW_MODE_MASK) {
+ bits++;
+ dimm->dtype = DEV_X16;
+ } else {
+ bits += 2;
+ dimm->dtype = DEV_X32;
+ }
+ } else {
+ bits = 16;
+ bits += ((val & SDRAM_K2_PAGESIZE_MASK) >>
+ SDRAM_K2_PAGESIZE_SHIFT) + 8;
+ bits += (val & SDRAM_K2_IBANK_MASK) >> SDRAM_K2_IBANK_SHIFT;
+ bits += (val & SDRAM_K2_EBANK_MASK) >> SDRAM_K2_EBANK_SHIFT;
+
+ val = (val & SDRAM_K2_NARROW_MODE_MASK) >>
+ SDRAM_K2_NARROW_MODE_SHIFT;
+ switch (val) {
+ case 0:
+ bits += 3;
+ dimm->dtype = DEV_X64;
+ break;
+ case 1:
+ bits += 2;
+ dimm->dtype = DEV_X32;
+ break;
+ case 2:
+ bits++;
+ dimm->dtype = DEV_X16;
+ break;
+ }
+ }
+
+ memsize = 1 << bits;
+
+ dimm->nr_pages = memsize >> PAGE_SHIFT;
+ dimm->grain = 4;
+ if ((val & SDRAM_TYPE_MASK) == SDRAM_TYPE_DDR2)
+ dimm->mtype = MEM_DDR2;
+ else
+ dimm->mtype = MEM_DDR3;
+
+ val = ti_edac_readl(edac, EMIF_ECC_CTRL);
+ if (val & ECC_ENABLED)
+ dimm->edac_mode = EDAC_SECDED;
+ else
+ dimm->edac_mode = EDAC_NONE;
+}
+
+static const struct of_device_id ti_edac_of_match[] = {
+ { .compatible = "ti,emif-keystone", .data = (void *)EMIF_TYPE_K2 },
+ { .compatible = "ti,emif-dra7xx", .data = (void *)EMIF_TYPE_DRA7 },
+ {},
+};
+
+static int _emif_get_id(struct device_node *node)
+{
+ struct device_node *np;
+ const __be32 *addrp;
+ u32 addr, my_addr;
+ int my_id = 0;
+
+ addrp = of_get_address(node, 0, NULL, NULL);
+ my_addr = (u32)of_translate_address(node, addrp);
+
+ for_each_matching_node(np, ti_edac_of_match) {
+ if (np == node)
+ continue;
+
+ addrp = of_get_address(np, 0, NULL, NULL);
+ addr = (u32)of_translate_address(np, addrp);
+
+ edac_printk(KERN_INFO, EDAC_MOD_NAME,
+ "addr=%x, my_addr=%x\n",
+ addr, my_addr);
+
+ if (addr < my_addr)
+ my_id++;
+ }
+
+ return my_id;
+}
+
+static int ti_edac_probe(struct platform_device *pdev)
+{
+ int error_irq = 0, ret = -ENODEV;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *reg;
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[1];
+ const struct of_device_id *id;
+ struct ti_edac *edac;
+ int emif_id;
+
+ id = of_match_device(ti_edac_of_match, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg)) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "EMIF controller regs not defined\n");
+ return PTR_ERR(reg);
+ }
+
+ layers[0].type = EDAC_MC_LAYER_ALL_MEM;
+ layers[0].size = 1;
+
+ /* Allocate ID number for our EMIF controller */
+ emif_id = _emif_get_id(pdev->dev.of_node);
+ if (emif_id < 0)
+ return -EINVAL;
+
+ mci = edac_mc_alloc(emif_id, 1, layers, sizeof(*edac));
+ if (!mci)
+ return -ENOMEM;
+
+ mci->pdev = &pdev->dev;
+ edac = mci->pvt_info;
+ edac->reg = reg;
+ platform_set_drvdata(pdev, mci);
+
+ mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED | EDAC_FLAG_NONE;
+ mci->mod_name = EDAC_MOD_NAME;
+ mci->ctl_name = id->compatible;
+ mci->dev_name = dev_name(&pdev->dev);
+
+ /* Setup memory layout */
+ ti_edac_setup_dimm(mci, (u32)(id->data));
+
+ /* add EMIF ECC error handler */
+ error_irq = platform_get_irq(pdev, 0);
+ if (!error_irq) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "EMIF irq number not defined.\n");
+ goto err;
+ }
+
+ ret = devm_request_irq(dev, error_irq, ti_edac_isr, 0,
+ "emif-edac-irq", mci);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "request_irq fail for EMIF EDAC irq\n");
+ goto err;
+ }
+
+ ret = edac_mc_add_mc(mci);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "Failed to register mci: %d.\n", ret);
+ goto err;
+ }
+
+ /* Generate an interrupt with each 1b error */
+ ti_edac_writel(edac, 1 << EMIF_1B_ECC_ERR_THRSH_SHIFT,
+ EMIF_1B_ECC_ERR_THRSH);
+
+ /* Enable interrupts */
+ ti_edac_writel(edac,
+ EMIF_1B_ECC_ERR | EMIF_2B_ECC_ERR | EMIF_WR_ECC_ERR,
+ EMIF_IRQ_ENABLE_SET);
+
+ return 0;
+
+err:
+ edac_mc_free(mci);
+ return ret;
+}
+
+static int ti_edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ return 0;
+}
+
+static struct platform_driver ti_edac_driver = {
+ .probe = ti_edac_probe,
+ .remove = ti_edac_remove,
+ .driver = {
+ .name = EDAC_MOD_NAME,
+ .of_match_table = ti_edac_of_match,
+ },
+};
+
+module_platform_driver(ti_edac_driver);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("EDAC Driver for Texas Instruments DDR3 MC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 981fba5..1621f2f 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -24,8 +24,6 @@
#include <linux/notifier.h>
#include <linux/extcon-provider.h>
#include <linux/regmap.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/mfd/axp20x.h>
/* Power source status register */
@@ -79,11 +77,6 @@ enum axp288_extcon_reg {
AXP288_BC_DET_STAT_REG = 0x2f,
};
-enum axp288_mux_select {
- EXTCON_GPIO_MUX_SEL_PMIC = 0,
- EXTCON_GPIO_MUX_SEL_SOC,
-};
-
enum axp288_extcon_irq {
VBUS_FALLING_IRQ = 0,
VBUS_RISING_IRQ,
@@ -104,10 +97,8 @@ struct axp288_extcon_info {
struct device *dev;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
- struct gpio_desc *gpio_mux_cntl;
int irq[EXTCON_IRQ_END];
struct extcon_dev *edev;
- struct notifier_block extcon_nb;
unsigned int previous_cable;
};
@@ -197,15 +188,6 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
}
no_vbus:
- /*
- * If VBUS is absent Connect D+/D- lines to PMIC for BC
- * detection. Else connect them to SOC for USB communication.
- */
- if (info->gpio_mux_cntl)
- gpiod_set_value(info->gpio_mux_cntl,
- vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC
- : EXTCON_GPIO_MUX_SEL_PMIC);
-
extcon_set_state_sync(info->edev, info->previous_cable, false);
if (info->previous_cable == EXTCON_CHG_USB_SDP)
extcon_set_state_sync(info->edev, EXTCON_USB, false);
@@ -253,8 +235,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
{
struct axp288_extcon_info *info;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
- struct axp288_extcon_pdata *pdata = pdev->dev.platform_data;
- int ret, i, pirq, gpio;
+ int ret, i, pirq;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -264,8 +245,6 @@ static int axp288_extcon_probe(struct platform_device *pdev)
info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc;
info->previous_cable = EXTCON_NONE;
- if (pdata)
- info->gpio_mux_cntl = pdata->gpio_mux_cntl;
platform_set_drvdata(pdev, info);
@@ -286,21 +265,11 @@ static int axp288_extcon_probe(struct platform_device *pdev)
return ret;
}
- /* Set up gpio control for USB Mux */
- if (info->gpio_mux_cntl) {
- gpio = desc_to_gpio(info->gpio_mux_cntl);
- ret = devm_gpio_request(&pdev->dev, gpio, "USB_MUX");
- if (ret < 0) {
- dev_err(&pdev->dev,
- "failed to request the gpio=%d\n", gpio);
- return ret;
- }
- gpiod_direction_output(info->gpio_mux_cntl,
- EXTCON_GPIO_MUX_SEL_PMIC);
- }
-
for (i = 0; i < EXTCON_IRQ_END; i++) {
pirq = platform_get_irq(pdev, i);
+ if (pirq < 0)
+ return pirq;
+
info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
if (info->irq[i] < 0) {
dev_err(&pdev->dev,
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
index 6187f73..6721ab0 100644
--- a/drivers/extcon/extcon-usbc-cros-ec.c
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -34,16 +34,26 @@ struct cros_ec_extcon_info {
struct notifier_block notifier;
+ unsigned int dr; /* data role */
+ bool pr; /* power role (true if VBUS enabled) */
bool dp; /* DisplayPort enabled */
bool mux; /* SuperSpeed (usb3) enabled */
unsigned int power_type;
};
static const unsigned int usb_type_c_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
EXTCON_DISP_DP,
EXTCON_NONE,
};
+enum usb_data_roles {
+ DR_NONE,
+ DR_HOST,
+ DR_DEVICE,
+};
+
/**
* cros_ec_pd_command() - Send a command to the EC.
* @info: pointer to struct cros_ec_extcon_info
@@ -150,6 +160,7 @@ static int cros_ec_usb_get_role(struct cros_ec_extcon_info *info,
pd_control.port = info->port_id;
pd_control.role = USB_PD_CTRL_ROLE_NO_CHANGE;
pd_control.mux = USB_PD_CTRL_MUX_NO_CHANGE;
+ pd_control.swap = USB_PD_CTRL_SWAP_NONE;
ret = cros_ec_pd_command(info, EC_CMD_USB_PD_CONTROL, 1,
&pd_control, sizeof(pd_control),
&resp, sizeof(resp));
@@ -183,11 +194,72 @@ static int cros_ec_pd_get_num_ports(struct cros_ec_extcon_info *info)
return resp.num_ports;
}
+static const char *cros_ec_usb_role_string(unsigned int role)
+{
+ return role == DR_NONE ? "DISCONNECTED" :
+ (role == DR_HOST ? "DFP" : "UFP");
+}
+
+static const char *cros_ec_usb_power_type_string(unsigned int type)
+{
+ switch (type) {
+ case USB_CHG_TYPE_NONE:
+ return "USB_CHG_TYPE_NONE";
+ case USB_CHG_TYPE_PD:
+ return "USB_CHG_TYPE_PD";
+ case USB_CHG_TYPE_PROPRIETARY:
+ return "USB_CHG_TYPE_PROPRIETARY";
+ case USB_CHG_TYPE_C:
+ return "USB_CHG_TYPE_C";
+ case USB_CHG_TYPE_BC12_DCP:
+ return "USB_CHG_TYPE_BC12_DCP";
+ case USB_CHG_TYPE_BC12_CDP:
+ return "USB_CHG_TYPE_BC12_CDP";
+ case USB_CHG_TYPE_BC12_SDP:
+ return "USB_CHG_TYPE_BC12_SDP";
+ case USB_CHG_TYPE_OTHER:
+ return "USB_CHG_TYPE_OTHER";
+ case USB_CHG_TYPE_VBUS:
+ return "USB_CHG_TYPE_VBUS";
+ case USB_CHG_TYPE_UNKNOWN:
+ return "USB_CHG_TYPE_UNKNOWN";
+ default:
+ return "USB_CHG_TYPE_UNKNOWN";
+ }
+}
+
+static bool cros_ec_usb_power_type_is_wall_wart(unsigned int type,
+ unsigned int role)
+{
+ switch (type) {
+ /* FIXME : Guppy, Donnettes, and other chargers will be miscategorized
+ * because they identify with USB_CHG_TYPE_C, but we can't return true
+ * here from that code because that breaks Suzy-Q and other kinds of
+ * USB Type-C cables and peripherals.
+ */
+ case USB_CHG_TYPE_PROPRIETARY:
+ case USB_CHG_TYPE_BC12_DCP:
+ return true;
+ case USB_CHG_TYPE_PD:
+ case USB_CHG_TYPE_C:
+ case USB_CHG_TYPE_BC12_CDP:
+ case USB_CHG_TYPE_BC12_SDP:
+ case USB_CHG_TYPE_OTHER:
+ case USB_CHG_TYPE_VBUS:
+ case USB_CHG_TYPE_UNKNOWN:
+ case USB_CHG_TYPE_NONE:
+ default:
+ return false;
+ }
+}
+
static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
bool force)
{
struct device *dev = info->dev;
int role, power_type;
+ unsigned int dr = DR_NONE;
+ bool pr = false;
bool polarity = false;
bool dp = false;
bool mux = false;
@@ -206,9 +278,12 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
dev_err(dev, "failed getting role err = %d\n", role);
return role;
}
+ dev_dbg(dev, "disconnected\n");
} else {
int pd_mux_state;
+ dr = (role & PD_CTRL_RESP_ROLE_DATA) ? DR_HOST : DR_DEVICE;
+ pr = (role & PD_CTRL_RESP_ROLE_POWER);
pd_mux_state = cros_ec_usb_get_pd_mux_state(info);
if (pd_mux_state < 0)
pd_mux_state = USB_PD_MUX_USB_ENABLED;
@@ -216,20 +291,62 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
dp = pd_mux_state & USB_PD_MUX_DP_ENABLED;
mux = pd_mux_state & USB_PD_MUX_USB_ENABLED;
hpd = pd_mux_state & USB_PD_MUX_HPD_IRQ;
- }
- if (force || info->dp != dp || info->mux != mux ||
- info->power_type != power_type) {
+ dev_dbg(dev,
+ "connected role 0x%x pwr type %d dr %d pr %d pol %d mux %d dp %d hpd %d\n",
+ role, power_type, dr, pr, polarity, mux, dp, hpd);
+ }
+ /*
+ * When there is no USB host (e.g. USB PD charger),
+ * we are not really a UFP for the AP.
+ */
+ if (dr == DR_DEVICE &&
+ cros_ec_usb_power_type_is_wall_wart(power_type, role))
+ dr = DR_NONE;
+
+ if (force || info->dr != dr || info->pr != pr || info->dp != dp ||
+ info->mux != mux || info->power_type != power_type) {
+ bool host_connected = false, device_connected = false;
+
+ dev_dbg(dev, "Type/Role switch! type = %s role = %s\n",
+ cros_ec_usb_power_type_string(power_type),
+ cros_ec_usb_role_string(dr));
+ info->dr = dr;
+ info->pr = pr;
info->dp = dp;
info->mux = mux;
info->power_type = power_type;
- extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
+ if (dr == DR_DEVICE)
+ device_connected = true;
+ else if (dr == DR_HOST)
+ host_connected = true;
+ extcon_set_state(info->edev, EXTCON_USB, device_connected);
+ extcon_set_state(info->edev, EXTCON_USB_HOST, host_connected);
+ extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
+ extcon_set_property(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_VBUS,
+ (union extcon_property_value)(int)pr);
+ extcon_set_property(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_VBUS,
+ (union extcon_property_value)(int)pr);
+ extcon_set_property(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_TYPEC_POLARITY,
+ (union extcon_property_value)(int)polarity);
+ extcon_set_property(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY,
+ (union extcon_property_value)(int)polarity);
extcon_set_property(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_TYPEC_POLARITY,
(union extcon_property_value)(int)polarity);
+ extcon_set_property(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_SS,
+ (union extcon_property_value)(int)mux);
+ extcon_set_property(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_SS,
+ (union extcon_property_value)(int)mux);
extcon_set_property(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_SS,
(union extcon_property_value)(int)mux);
@@ -237,6 +354,8 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
EXTCON_PROP_DISP_HPD,
(union extcon_property_value)(int)hpd);
+ extcon_sync(info->edev, EXTCON_USB);
+ extcon_sync(info->edev, EXTCON_USB_HOST);
extcon_sync(info->edev, EXTCON_DISP_DP);
} else if (hpd) {
@@ -322,13 +441,28 @@ static int extcon_cros_ec_probe(struct platform_device *pdev)
return ret;
}
+ extcon_set_property_capability(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_VBUS);
+ extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_VBUS);
+ extcon_set_property_capability(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_SS);
+ extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_SS);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_SS);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_DISP_HPD);
+ info->dr = DR_NONE;
+ info->pr = false;
+
platform_set_drvdata(pdev, info);
/* Get PD events from the EC */
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 2b4c39f..6047ed4 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -159,13 +159,21 @@ config RESET_ATTACK_MITIGATION
using the TCG Platform Reset Attack Mitigation specification. This
protects against an attacker forcibly rebooting the system while it
still contains secrets in RAM, booting another OS and extracting the
- secrets.
+ secrets. This should only be enabled when userland is configured to
+ clear the MemoryOverwriteRequest flag on clean shutdown after secrets
+ have been evicted, since otherwise it will trigger even on clean
+ reboots.
endmenu
config UEFI_CPER
bool
+config UEFI_CPER_ARM
+ bool
+ depends on UEFI_CPER && ( ARM || ARM64 )
+ default y
+
config EFI_DEV_PATH_PARSER
bool
depends on ACPI
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 269501d..a3e73d6 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -30,3 +30,4 @@ arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
obj-$(CONFIG_ARM64) += $(arm-obj-y)
obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
+obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index ec8ac5c..e456f46 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -20,10 +20,6 @@
#define NO_FURTHER_WRITE_ACTION -1
-#ifndef phys_to_page
-#define phys_to_page(x) pfn_to_page((x) >> PAGE_SHIFT)
-#endif
-
/**
* efi_free_all_buff_pages - free all previous allocated buffer pages
* @cap_info: pointer to current instance of capsule_info structure
@@ -35,7 +31,7 @@
static void efi_free_all_buff_pages(struct capsule_info *cap_info)
{
while (cap_info->index > 0)
- __free_page(phys_to_page(cap_info->pages[--cap_info->index]));
+ __free_page(cap_info->pages[--cap_info->index]);
cap_info->index = NO_FURTHER_WRITE_ACTION;
}
@@ -49,7 +45,7 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
pages_needed = ALIGN(cap_info->total_size, PAGE_SIZE) / PAGE_SIZE;
if (pages_needed == 0) {
- pr_err("invalid capsule size");
+ pr_err("invalid capsule size\n");
return -EINVAL;
}
@@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
cap_info->pages = temp_page;
+ temp_page = krealloc(cap_info->phys,
+ pages_needed * sizeof(phys_addr_t *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!temp_page)
+ return -ENOMEM;
+
+ cap_info->phys = temp_page;
+
return 0;
}
@@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
**/
static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
{
+ bool do_vunmap = false;
int ret;
- ret = efi_capsule_update(&cap_info->header, cap_info->pages);
+ /*
+ * cap_info->capsule may have been assigned already by a quirk
+ * handler, so only overwrite it if it is NULL
+ */
+ if (!cap_info->capsule) {
+ cap_info->capsule = vmap(cap_info->pages, cap_info->index,
+ VM_MAP, PAGE_KERNEL);
+ if (!cap_info->capsule)
+ return -ENOMEM;
+ do_vunmap = true;
+ }
+
+ ret = efi_capsule_update(cap_info->capsule, cap_info->phys);
+ if (do_vunmap)
+ vunmap(cap_info->capsule);
if (ret) {
pr_err("capsule update failed\n");
return ret;
@@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
goto failed;
}
- cap_info->pages[cap_info->index++] = page_to_phys(page);
+ cap_info->pages[cap_info->index] = page;
+ cap_info->phys[cap_info->index] = page_to_phys(page);
cap_info->page_bytes_remain = PAGE_SIZE;
+ cap_info->index++;
} else {
- page = phys_to_page(cap_info->pages[cap_info->index - 1]);
+ page = cap_info->pages[cap_info->index - 1];
}
kbuff = kmap(page);
@@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
struct capsule_info *cap_info = file->private_data;
kfree(cap_info->pages);
+ kfree(cap_info->phys);
kfree(file->private_data);
file->private_data = NULL;
return 0;
@@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
+ cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
+ if (!cap_info->phys) {
+ kfree(cap_info->pages);
+ kfree(cap_info);
+ return -ENOMEM;
+ }
+
file->private_data = cap_info;
return 0;
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
new file mode 100644
index 0000000..698e5c8
--- /dev/null
+++ b/drivers/firmware/efi/cper-arm.c
@@ -0,0 +1,356 @@
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/printk.h>
+#include <linux/bcd.h>
+#include <acpi/ghes.h>
+#include <ras/ras_event.h>
+
+#define INDENT_SP " "
+
+static const char * const arm_reg_ctx_strs[] = {
+ "AArch32 general purpose registers",
+ "AArch32 EL1 context registers",
+ "AArch32 EL2 context registers",
+ "AArch32 secure context registers",
+ "AArch64 general purpose registers",
+ "AArch64 EL1 context registers",
+ "AArch64 EL2 context registers",
+ "AArch64 EL3 context registers",
+ "Misc. system register structure",
+};
+
+static const char * const arm_err_trans_type_strs[] = {
+ "Instruction",
+ "Data Access",
+ "Generic",
+};
+
+static const char * const arm_bus_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+};
+
+static const char * const arm_cache_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+ "Eviction",
+ "Snooping (processor initiated a cache snoop that resulted in an error)",
+ "Snooped (processor raised a cache error caused by another processor or device snooping its cache)",
+ "Management",
+};
+
+static const char * const arm_tlb_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+ "Local management operation (processor initiated a TLB management operation that resulted in an error)",
+ "External management operation (processor raised a TLB error caused by another processor or device broadcasting TLB operations)",
+};
+
+static const char * const arm_bus_err_part_type_strs[] = {
+ "Local processor originated request",
+ "Local processor responded to request",
+ "Local processor observed",
+ "Generic",
+};
+
+static const char * const arm_bus_err_addr_space_strs[] = {
+ "External Memory Access",
+ "Internal Memory Access",
+ "Unknown",
+ "Device Memory Access",
+};
+
+static void cper_print_arm_err_info(const char *pfx, u32 type,
+ u64 error_info)
+{
+ u8 trans_type, op_type, level, participation_type, address_space;
+ u16 mem_attributes;
+ bool proc_context_corrupt, corrected, precise_pc, restartable_pc;
+ bool time_out, access_mode;
+
+ /* If the type is unknown, bail. */
+ if (type > CPER_ARM_MAX_TYPE)
+ return;
+
+ /*
+ * Vendor type errors have error information values that are vendor
+ * specific.
+ */
+ if (type == CPER_ARM_VENDOR_ERROR)
+ return;
+
+ if (error_info & CPER_ARM_ERR_VALID_TRANSACTION_TYPE) {
+ trans_type = ((error_info >> CPER_ARM_ERR_TRANSACTION_SHIFT)
+ & CPER_ARM_ERR_TRANSACTION_MASK);
+ if (trans_type < ARRAY_SIZE(arm_err_trans_type_strs)) {
+ printk("%stransaction type: %s\n", pfx,
+ arm_err_trans_type_strs[trans_type]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_OPERATION_TYPE) {
+ op_type = ((error_info >> CPER_ARM_ERR_OPERATION_SHIFT)
+ & CPER_ARM_ERR_OPERATION_MASK);
+ switch (type) {
+ case CPER_ARM_CACHE_ERROR:
+ if (op_type < ARRAY_SIZE(arm_cache_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_cache_err_op_strs[op_type]);
+ }
+ break;
+ case CPER_ARM_TLB_ERROR:
+ if (op_type < ARRAY_SIZE(arm_tlb_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_tlb_err_op_strs[op_type]);
+ }
+ break;
+ case CPER_ARM_BUS_ERROR:
+ if (op_type < ARRAY_SIZE(arm_bus_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_bus_err_op_strs[op_type]);
+ }
+ break;
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_LEVEL) {
+ level = ((error_info >> CPER_ARM_ERR_LEVEL_SHIFT)
+ & CPER_ARM_ERR_LEVEL_MASK);
+ switch (type) {
+ case CPER_ARM_CACHE_ERROR:
+ printk("%scache level: %d\n", pfx, level);
+ break;
+ case CPER_ARM_TLB_ERROR:
+ printk("%sTLB level: %d\n", pfx, level);
+ break;
+ case CPER_ARM_BUS_ERROR:
+ printk("%saffinity level at which the bus error occurred: %d\n",
+ pfx, level);
+ break;
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT) {
+ proc_context_corrupt = ((error_info >> CPER_ARM_ERR_PC_CORRUPT_SHIFT)
+ & CPER_ARM_ERR_PC_CORRUPT_MASK);
+ if (proc_context_corrupt)
+ printk("%sprocessor context corrupted\n", pfx);
+ else
+ printk("%sprocessor context not corrupted\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_CORRECTED) {
+ corrected = ((error_info >> CPER_ARM_ERR_CORRECTED_SHIFT)
+ & CPER_ARM_ERR_CORRECTED_MASK);
+ if (corrected)
+ printk("%sthe error has been corrected\n", pfx);
+ else
+ printk("%sthe error has not been corrected\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_PRECISE_PC) {
+ precise_pc = ((error_info >> CPER_ARM_ERR_PRECISE_PC_SHIFT)
+ & CPER_ARM_ERR_PRECISE_PC_MASK);
+ if (precise_pc)
+ printk("%sPC is precise\n", pfx);
+ else
+ printk("%sPC is imprecise\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_RESTARTABLE_PC) {
+ restartable_pc = ((error_info >> CPER_ARM_ERR_RESTARTABLE_PC_SHIFT)
+ & CPER_ARM_ERR_RESTARTABLE_PC_MASK);
+ if (restartable_pc)
+ printk("%sProgram execution can be restarted reliably at the PC associated with the error.\n", pfx);
+ }
+
+ /* The rest of the fields are specific to bus errors */
+ if (type != CPER_ARM_BUS_ERROR)
+ return;
+
+ if (error_info & CPER_ARM_ERR_VALID_PARTICIPATION_TYPE) {
+ participation_type = ((error_info >> CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT)
+ & CPER_ARM_ERR_PARTICIPATION_TYPE_MASK);
+ if (participation_type < ARRAY_SIZE(arm_bus_err_part_type_strs)) {
+ printk("%sparticipation type: %s\n", pfx,
+ arm_bus_err_part_type_strs[participation_type]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_TIME_OUT) {
+ time_out = ((error_info >> CPER_ARM_ERR_TIME_OUT_SHIFT)
+ & CPER_ARM_ERR_TIME_OUT_MASK);
+ if (time_out)
+ printk("%srequest timed out\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_ADDRESS_SPACE) {
+ address_space = ((error_info >> CPER_ARM_ERR_ADDRESS_SPACE_SHIFT)
+ & CPER_ARM_ERR_ADDRESS_SPACE_MASK);
+ if (address_space < ARRAY_SIZE(arm_bus_err_addr_space_strs)) {
+ printk("%saddress space: %s\n", pfx,
+ arm_bus_err_addr_space_strs[address_space]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_MEM_ATTRIBUTES) {
+ mem_attributes = ((error_info >> CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT)
+ & CPER_ARM_ERR_MEM_ATTRIBUTES_MASK);
+ printk("%smemory access attributes:0x%x\n", pfx, mem_attributes);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_ACCESS_MODE) {
+ access_mode = ((error_info >> CPER_ARM_ERR_ACCESS_MODE_SHIFT)
+ & CPER_ARM_ERR_ACCESS_MODE_MASK);
+ if (access_mode)
+ printk("%saccess mode: normal\n", pfx);
+ else
+ printk("%saccess mode: secure\n", pfx);
+ }
+}
+
+void cper_print_proc_arm(const char *pfx,
+ const struct cper_sec_proc_arm *proc)
+{
+ int i, len, max_ctx_type;
+ struct cper_arm_err_info *err_info;
+ struct cper_arm_ctx_info *ctx_info;
+ char newpfx[64], infopfx[64];
+
+ printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
+
+ len = proc->section_length - (sizeof(*proc) +
+ proc->err_info_num * (sizeof(*err_info)));
+ if (len < 0) {
+ printk("%ssection length: %d\n", pfx, proc->section_length);
+ printk("%ssection length is too small\n", pfx);
+ printk("%sfirmware-generated error record is incorrect\n", pfx);
+ printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
+ return;
+ }
+
+ if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
+ printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
+ pfx, proc->mpidr);
+
+ if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
+ printk("%serror affinity level: %d\n", pfx,
+ proc->affinity_level);
+
+ if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
+ printk("%srunning state: 0x%x\n", pfx, proc->running_state);
+ printk("%sPower State Coordination Interface state: %d\n",
+ pfx, proc->psci_state);
+ }
+
+ snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
+
+ err_info = (struct cper_arm_err_info *)(proc + 1);
+ for (i = 0; i < proc->err_info_num; i++) {
+ printk("%sError info structure %d:\n", pfx, i);
+
+ printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
+
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
+ printk("%sfirst error captured\n", newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
+ printk("%slast error captured\n", newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
+ printk("%spropagated error captured\n",
+ newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
+ printk("%soverflow occurred, error info is incomplete\n",
+ newpfx);
+ }
+
+ printk("%serror_type: %d, %s\n", newpfx, err_info->type,
+ err_info->type < ARRAY_SIZE(cper_proc_error_type_strs) ?
+ cper_proc_error_type_strs[err_info->type] : "unknown");
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO) {
+ printk("%serror_info: 0x%016llx\n", newpfx,
+ err_info->error_info);
+ snprintf(infopfx, sizeof(infopfx), "%s%s", newpfx, INDENT_SP);
+ cper_print_arm_err_info(infopfx, err_info->type,
+ err_info->error_info);
+ }
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
+ printk("%svirtual fault address: 0x%016llx\n",
+ newpfx, err_info->virt_fault_addr);
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
+ printk("%sphysical fault address: 0x%016llx\n",
+ newpfx, err_info->physical_fault_addr);
+ err_info += 1;
+ }
+
+ ctx_info = (struct cper_arm_ctx_info *)err_info;
+ max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
+ for (i = 0; i < proc->context_info_num; i++) {
+ int size = sizeof(*ctx_info) + ctx_info->size;
+
+ printk("%sContext info structure %d:\n", pfx, i);
+ if (len < size) {
+ printk("%ssection length is too small\n", newpfx);
+ printk("%sfirmware-generated error record is incorrect\n", pfx);
+ return;
+ }
+ if (ctx_info->type > max_ctx_type) {
+ printk("%sInvalid context type: %d (max: %d)\n",
+ newpfx, ctx_info->type, max_ctx_type);
+ return;
+ }
+ printk("%sregister context type: %s\n", newpfx,
+ arm_reg_ctx_strs[ctx_info->type]);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (ctx_info + 1), ctx_info->size, 0);
+ len -= size;
+ ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
+ }
+
+ if (len > 0) {
+ printk("%sVendor specific error info has %u bytes:\n", pfx,
+ len);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
+ len, true);
+ }
+}
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index d2fcafc..c165933 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -122,7 +122,7 @@ static const char * const proc_isa_strs[] = {
"ARM A64",
};
-static const char * const proc_error_type_strs[] = {
+const char * const cper_proc_error_type_strs[] = {
"cache error",
"TLB error",
"bus error",
@@ -157,8 +157,8 @@ static void cper_print_proc_generic(const char *pfx,
if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) {
printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type);
cper_print_bits(pfx, proc->proc_error_type,
- proc_error_type_strs,
- ARRAY_SIZE(proc_error_type_strs));
+ cper_proc_error_type_strs,
+ ARRAY_SIZE(cper_proc_error_type_strs));
}
if (proc->validation_bits & CPER_PROC_VALID_OPERATION)
printk("%s""operation: %d, %s\n", pfx, proc->operation,
@@ -188,122 +188,6 @@ static void cper_print_proc_generic(const char *pfx,
printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
}
-#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
-static const char * const arm_reg_ctx_strs[] = {
- "AArch32 general purpose registers",
- "AArch32 EL1 context registers",
- "AArch32 EL2 context registers",
- "AArch32 secure context registers",
- "AArch64 general purpose registers",
- "AArch64 EL1 context registers",
- "AArch64 EL2 context registers",
- "AArch64 EL3 context registers",
- "Misc. system register structure",
-};
-
-static void cper_print_proc_arm(const char *pfx,
- const struct cper_sec_proc_arm *proc)
-{
- int i, len, max_ctx_type;
- struct cper_arm_err_info *err_info;
- struct cper_arm_ctx_info *ctx_info;
- char newpfx[64];
-
- printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
-
- len = proc->section_length - (sizeof(*proc) +
- proc->err_info_num * (sizeof(*err_info)));
- if (len < 0) {
- printk("%ssection length: %d\n", pfx, proc->section_length);
- printk("%ssection length is too small\n", pfx);
- printk("%sfirmware-generated error record is incorrect\n", pfx);
- printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
- return;
- }
-
- if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
- printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
- pfx, proc->mpidr);
-
- if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
- printk("%serror affinity level: %d\n", pfx,
- proc->affinity_level);
-
- if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
- printk("%srunning state: 0x%x\n", pfx, proc->running_state);
- printk("%sPower State Coordination Interface state: %d\n",
- pfx, proc->psci_state);
- }
-
- snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
-
- err_info = (struct cper_arm_err_info *)(proc + 1);
- for (i = 0; i < proc->err_info_num; i++) {
- printk("%sError info structure %d:\n", pfx, i);
-
- printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
-
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
- if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
- printk("%sfirst error captured\n", newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
- printk("%slast error captured\n", newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
- printk("%spropagated error captured\n",
- newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
- printk("%soverflow occurred, error info is incomplete\n",
- newpfx);
- }
-
- printk("%serror_type: %d, %s\n", newpfx, err_info->type,
- err_info->type < ARRAY_SIZE(proc_error_type_strs) ?
- proc_error_type_strs[err_info->type] : "unknown");
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO)
- printk("%serror_info: 0x%016llx\n", newpfx,
- err_info->error_info);
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
- printk("%svirtual fault address: 0x%016llx\n",
- newpfx, err_info->virt_fault_addr);
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
- printk("%sphysical fault address: 0x%016llx\n",
- newpfx, err_info->physical_fault_addr);
- err_info += 1;
- }
-
- ctx_info = (struct cper_arm_ctx_info *)err_info;
- max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
- for (i = 0; i < proc->context_info_num; i++) {
- int size = sizeof(*ctx_info) + ctx_info->size;
-
- printk("%sContext info structure %d:\n", pfx, i);
- if (len < size) {
- printk("%ssection length is too small\n", newpfx);
- printk("%sfirmware-generated error record is incorrect\n", pfx);
- return;
- }
- if (ctx_info->type > max_ctx_type) {
- printk("%sInvalid context type: %d (max: %d)\n",
- newpfx, ctx_info->type, max_ctx_type);
- return;
- }
- printk("%sregister context type: %s\n", newpfx,
- arm_reg_ctx_strs[ctx_info->type]);
- print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
- (ctx_info + 1), ctx_info->size, 0);
- len -= size;
- ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
- }
-
- if (len > 0) {
- printk("%sVendor specific error info has %u bytes:\n", pfx,
- len);
- print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
- len, true);
- }
-}
-#endif
-
static const char * const mem_err_type_strs[] = {
"unknown",
"no error",
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 557a478..8ce70c2 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -608,7 +608,7 @@ static int __init efi_load_efivars(void)
return 0;
pdev = platform_device_register_simple("efivars", 0, NULL, 0);
- return IS_ERR(pdev) ? PTR_ERR(pdev) : 0;
+ return PTR_ERR_OR_ZERO(pdev);
}
device_initcall(efi_load_efivars);
#endif
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index f3f4f81..bb1c068 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -77,8 +77,8 @@ static int psci_ops_check(void)
return 0;
}
-static int find_clusters(const struct cpumask *cpus,
- const struct cpumask **clusters)
+static int find_cpu_groups(const struct cpumask *cpus,
+ const struct cpumask **cpu_groups)
{
unsigned int nb = 0;
cpumask_var_t tmp;
@@ -88,11 +88,11 @@ static int find_clusters(const struct cpumask *cpus,
cpumask_copy(tmp, cpus);
while (!cpumask_empty(tmp)) {
- const struct cpumask *cluster =
+ const struct cpumask *cpu_group =
topology_core_cpumask(cpumask_any(tmp));
- clusters[nb++] = cluster;
- cpumask_andnot(tmp, tmp, cluster);
+ cpu_groups[nb++] = cpu_group;
+ cpumask_andnot(tmp, tmp, cpu_group);
}
free_cpumask_var(tmp);
@@ -170,24 +170,24 @@ static int hotplug_tests(void)
{
int err;
cpumask_var_t offlined_cpus;
- int i, nb_cluster;
- const struct cpumask **clusters;
+ int i, nb_cpu_group;
+ const struct cpumask **cpu_groups;
char *page_buf;
err = -ENOMEM;
if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
return err;
- /* We may have up to nb_available_cpus clusters. */
- clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters),
- GFP_KERNEL);
- if (!clusters)
+ /* We may have up to nb_available_cpus cpu_groups. */
+ cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
+ GFP_KERNEL);
+ if (!cpu_groups)
goto out_free_cpus;
page_buf = (char *)__get_free_page(GFP_KERNEL);
if (!page_buf)
- goto out_free_clusters;
+ goto out_free_cpu_groups;
err = 0;
- nb_cluster = find_clusters(cpu_online_mask, clusters);
+ nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
/*
* Of course the last CPU cannot be powered down and cpu_down() should
@@ -197,24 +197,22 @@ static int hotplug_tests(void)
err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
/*
- * Take down CPUs by cluster this time. When the last CPU is turned
- * off, the cluster itself should shut down.
+ * Take down CPUs by cpu group this time. When the last CPU is turned
+ * off, the cpu group itself should shut down.
*/
- for (i = 0; i < nb_cluster; ++i) {
- int cluster_id =
- topology_physical_package_id(cpumask_any(clusters[i]));
+ for (i = 0; i < nb_cpu_group; ++i) {
ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
- clusters[i]);
+ cpu_groups[i]);
/* Remove trailing newline. */
page_buf[len - 1] = '\0';
- pr_info("Trying to turn off and on again cluster %d "
- "(CPUs %s)\n", cluster_id, page_buf);
- err += down_and_up_cpus(clusters[i], offlined_cpus);
+ pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
+ i, page_buf);
+ err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
}
free_page((unsigned long)page_buf);
-out_free_clusters:
- kfree(clusters);
+out_free_cpu_groups:
+ kfree(cpu_groups);
out_free_cpus:
free_cpumask_var(offlined_cpus);
return err;
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index dfcf56e..76861a0 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -522,6 +522,7 @@ static struct of_device_id const bcm_kona_gpio_of_match[] = {
* category than their parents, so it won't report false recursion.
*/
static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
@@ -531,7 +532,7 @@ static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
ret = irq_set_chip_data(irq, d->host_data);
if (ret < 0)
return ret;
- irq_set_lockdep_class(irq, &gpio_lock_class);
+ irq_set_lockdep_class(irq, &gpio_lock_class, &gpio_request_class);
irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq);
irq_set_noprobe(irq);
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 545d43a..bb4f8cf 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -327,6 +327,7 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
* category than their parents, so it won't report false recursion.
*/
static struct lock_class_key brcmstb_gpio_irq_lock_class;
+static struct lock_class_key brcmstb_gpio_irq_request_class;
static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
@@ -346,7 +347,8 @@ static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
ret = irq_set_chip_data(irq, &bank->gc);
if (ret < 0)
return ret;
- irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class);
+ irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class,
+ &brcmstb_gpio_irq_request_class);
irq_set_chip_and_handler(irq, &priv->irq_chip, handle_level_irq);
irq_set_noprobe(irq);
return 0;
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index dd67a31..c38624e 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
@@ -380,9 +381,16 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
}
}
+static const char *mrfld_gpio_get_pinctrl_dev_name(void)
+{
+ const char *dev_name = acpi_dev_get_first_match_name("INTC1002", NULL, -1);
+ return dev_name ? dev_name : "pinctrl-merrifield";
+}
+
static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const struct mrfld_gpio_pinrange *range;
+ const char *pinctrl_dev_name;
struct mrfld_gpio *priv;
u32 gpio_base, irq_base;
void __iomem *base;
@@ -439,10 +447,11 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
return retval;
}
+ pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name();
for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
range = &mrfld_gpio_ranges[i];
retval = gpiochip_add_pin_range(&priv->chip,
- "pinctrl-merrifield",
+ pinctrl_dev_name,
range->gpio_base,
range->pin_base,
range->npins);
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index f9042bc..7b14d62 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -152,14 +152,13 @@ static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
{
unsigned long get_mask = 0;
unsigned long set_mask = 0;
- int bit = 0;
- while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio) {
- if (gc->bgpio_dir & BIT(bit))
- set_mask |= BIT(bit);
- else
- get_mask |= BIT(bit);
- }
+ /* Make sure we first clear any bits that are zero when we read the register */
+ *bits &= ~*mask;
+
+ /* Exploit the fact that we know which directions are set */
+ set_mask = *mask & gc->bgpio_dir;
+ get_mask = *mask & ~gc->bgpio_dir;
if (set_mask)
*bits |= gc->read_reg(gc->reg_set) & set_mask;
@@ -176,13 +175,13 @@ static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
/*
* This only works if the bits in the GPIO register are in native endianness.
- * It is dirt simple and fast in this case. (Also the most common case.)
*/
static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
-
- *bits = gc->read_reg(gc->reg_dat) & *mask;
+ /* Make sure we first clear any bits that are zero when we read the register */
+ *bits &= ~*mask;
+ *bits |= gc->read_reg(gc->reg_dat) & *mask;
return 0;
}
@@ -196,9 +195,12 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
unsigned long val;
int bit;
+ /* Make sure we first clear any bits that are zero when we read the register */
+ *bits &= ~*mask;
+
/* Create a mirrored mask */
- bit = 0;
- while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio)
+ bit = -1;
+ while ((bit = find_next_bit(mask, gc->ngpio, bit + 1)) < gc->ngpio)
readmask |= bgpio_line2mask(gc, bit);
/* Read the register */
@@ -208,8 +210,8 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
* Mirror the result into the "bits" result, this will give line 0
* in bit 0 ... line 31 in bit 31 for a 32bit register.
*/
- bit = 0;
- while ((bit = find_next_bit(&val, gc->ngpio, bit)) != gc->ngpio)
+ bit = -1;
+ while ((bit = find_next_bit(&val, gc->ngpio, bit + 1)) < gc->ngpio)
*bits |= bgpio_line2mask(gc, bit);
return 0;
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index 23e771d..e85903e 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -103,8 +103,8 @@ static int gpio_reg_to_irq(struct gpio_chip *gc, unsigned offset)
struct gpio_reg *r = to_gpio_reg(gc);
int irq = r->irqs[offset];
- if (irq >= 0 && r->irq.domain)
- irq = irq_find_mapping(r->irq.domain, irq);
+ if (irq >= 0 && r->irqdomain)
+ irq = irq_find_mapping(r->irqdomain, irq);
return irq;
}
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 8db47f6..02fa8fe 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -565,6 +565,7 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
* than their parents, so it won't report false recursion.
*/
static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
static int tegra_gpio_probe(struct platform_device *pdev)
{
@@ -670,7 +671,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
bank = &tgi->bank_info[GPIO_BANK(gpio)];
- irq_set_lockdep_class(irq, &gpio_lock_class);
+ irq_set_lockdep_class(irq, &gpio_lock_class,
+ &gpio_request_class);
irq_set_chip_data(irq, bank);
irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
}
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 2313af8..acd5911 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -139,7 +139,7 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
struct irq_data *irq_data,
- bool early)
+ bool reserve)
{
struct xgene_gpio_sb *priv = d->host_data;
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index eb4528c..d6f3d9e 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1074,7 +1074,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
}
if (!chip->names)
- devprop_gpiochip_set_names(chip);
+ devprop_gpiochip_set_names(chip, dev_fwnode(chip->parent));
acpi_gpiochip_request_regions(acpi_gpio);
acpi_gpiochip_scan_gpios(acpi_gpio);
diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c
index 27f383b..f748aa3 100644
--- a/drivers/gpio/gpiolib-devprop.c
+++ b/drivers/gpio/gpiolib-devprop.c
@@ -19,30 +19,27 @@
/**
* devprop_gpiochip_set_names - Set GPIO line names using device properties
* @chip: GPIO chip whose lines should be named, if possible
+ * @fwnode: Property Node containing the gpio-line-names property
*
* Looks for device property "gpio-line-names" and if it exists assigns
* GPIO line names for the chip. The memory allocated for the assigned
* names belong to the underlying firmware node and should not be released
* by the caller.
*/
-void devprop_gpiochip_set_names(struct gpio_chip *chip)
+void devprop_gpiochip_set_names(struct gpio_chip *chip,
+ const struct fwnode_handle *fwnode)
{
struct gpio_device *gdev = chip->gpiodev;
const char **names;
int ret, i;
- if (!chip->parent) {
- dev_warn(&gdev->dev, "GPIO chip parent is NULL\n");
- return;
- }
-
- ret = device_property_read_string_array(chip->parent, "gpio-line-names",
+ ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
NULL, 0);
if (ret < 0)
return;
if (ret != gdev->ngpio) {
- dev_warn(chip->parent,
+ dev_warn(&gdev->dev,
"names %d do not match number of GPIOs %d\n", ret,
gdev->ngpio);
return;
@@ -52,10 +49,10 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip)
if (!names)
return;
- ret = device_property_read_string_array(chip->parent, "gpio-line-names",
+ ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
names, gdev->ngpio);
if (ret < 0) {
- dev_warn(chip->parent, "failed to read GPIO line names\n");
+ dev_warn(&gdev->dev, "failed to read GPIO line names\n");
kfree(names);
return;
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index e0d59e6..72a0695 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -493,7 +493,8 @@ int of_gpiochip_add(struct gpio_chip *chip)
/* If the chip defines names itself, these take precedence */
if (!chip->names)
- devprop_gpiochip_set_names(chip);
+ devprop_gpiochip_set_names(chip,
+ of_fwnode_handle(chip->of_node));
of_node_get(chip->of_node);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index aad84a6..14532d9 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -73,7 +73,8 @@ LIST_HEAD(gpio_devices);
static void gpiochip_free_hogs(struct gpio_chip *chip);
static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
- struct lock_class_key *key);
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
@@ -1100,7 +1101,8 @@ static void gpiochip_setup_devs(void)
}
int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
- struct lock_class_key *key)
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key)
{
unsigned long flags;
int status = 0;
@@ -1246,7 +1248,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
if (status)
goto err_remove_from_list;
- status = gpiochip_add_irqchip(chip, key);
+ status = gpiochip_add_irqchip(chip, lock_key, request_key);
if (status)
goto err_remove_chip;
@@ -1632,7 +1634,7 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
* This lock class tells lockdep that GPIO irqs are in a different
* category than their parents, so it won't report false recursion.
*/
- irq_set_lockdep_class(irq, chip->irq.lock_key);
+ irq_set_lockdep_class(irq, chip->irq.lock_key, chip->irq.request_key);
irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler);
/* Chips that use nested thread handlers have them marked */
if (chip->irq.threaded)
@@ -1712,10 +1714,12 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
/**
* gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip
* @gpiochip: the GPIO chip to add the IRQ chip to
- * @lock_key: lockdep class
+ * @lock_key: lockdep class for IRQ lock
+ * @request_key: lockdep class for IRQ request
*/
static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
- struct lock_class_key *lock_key)
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key)
{
struct irq_chip *irqchip = gpiochip->irq.chip;
const struct irq_domain_ops *ops;
@@ -1753,6 +1757,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
gpiochip->to_irq = gpiochip_to_irq;
gpiochip->irq.default_type = type;
gpiochip->irq.lock_key = lock_key;
+ gpiochip->irq.request_key = request_key;
if (gpiochip->irq.domain_ops)
ops = gpiochip->irq.domain_ops;
@@ -1850,7 +1855,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
* to have the core avoid setting up any default type in the hardware.
* @threaded: whether this irqchip uses a nested thread handler
- * @lock_key: lockdep class
+ * @lock_key: lockdep class for IRQ lock
+ * @request_key: lockdep class for IRQ request
*
* This function closely associates a certain irqchip with a certain
* gpiochip, providing an irq domain to translate the local IRQs to
@@ -1872,7 +1878,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
irq_flow_handler_t handler,
unsigned int type,
bool threaded,
- struct lock_class_key *lock_key)
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key)
{
struct device_node *of_node;
@@ -1913,6 +1920,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
gpiochip->irq.default_type = type;
gpiochip->to_irq = gpiochip_to_irq;
gpiochip->irq.lock_key = lock_key;
+ gpiochip->irq.request_key = request_key;
gpiochip->irq.domain = irq_domain_add_simple(of_node,
gpiochip->ngpio, first_irq,
&gpiochip_domain_ops, gpiochip);
@@ -1940,7 +1948,8 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
#else /* CONFIG_GPIOLIB_IRQCHIP */
static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
- struct lock_class_key *key)
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key)
{
return 0;
}
@@ -2884,6 +2893,27 @@ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
/**
+ * gpiod_set_value_nocheck() - set a GPIO line value without checking
+ * @desc: the descriptor to set the value on
+ * @value: value to set
+ *
+ * This sets the value of a GPIO line backing a descriptor, applying
+ * different semantic quirks like active low and open drain/source
+ * handling.
+ */
+static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
+{
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ gpio_set_open_drain_value_commit(desc, value);
+ else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ gpio_set_open_source_value_commit(desc, value);
+ else
+ gpiod_set_raw_value_commit(desc, value);
+}
+
+/**
* gpiod_set_value() - assign a gpio's value
* @desc: gpio whose value will be assigned
* @value: value to assign
@@ -2897,16 +2927,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
void gpiod_set_value(struct gpio_desc *desc, int value)
{
VALIDATE_DESC_VOID(desc);
- /* Should be using gpiod_set_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
- value = !value;
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- gpio_set_open_drain_value_commit(desc, value);
- else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- gpio_set_open_source_value_commit(desc, value);
- else
- gpiod_set_raw_value_commit(desc, value);
+ gpiod_set_value_nocheck(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value);
@@ -3234,9 +3256,7 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep_if(extra_checks);
VALIDATE_DESC_VOID(desc);
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
- value = !value;
- gpiod_set_raw_value_commit(desc, value);
+ gpiod_set_value_nocheck(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index af48322..6c44d16 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -228,7 +228,8 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
return desc - &desc->gdev->descs[0];
}
-void devprop_gpiochip_set_names(struct gpio_chip *chip);
+void devprop_gpiochip_set_names(struct gpio_chip *chip,
+ const struct fwnode_handle *fwnode);
/* With descriptor prefix */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index da43813..5aeb5f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2467,7 +2467,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
- PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f71fe6d..bb5fa89 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2336,7 +2336,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct dm_connector_state *dm_state)
{
struct drm_display_mode *preferred_mode = NULL;
- const struct drm_connector *drm_connector;
+ struct drm_connector *drm_connector;
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode = *drm_mode;
bool native_mode_found = false;
@@ -2355,11 +2355,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (!aconnector->dc_sink) {
/*
- * Exclude MST from creating fake_sink
- * TODO: need to enable MST into fake_sink feature
+ * Create dc_sink when necessary to MST
+ * Don't apply fake_sink to MST
*/
- if (aconnector->mst_port)
- goto stream_create_fail;
+ if (aconnector->mst_port) {
+ dm_dp_mst_dc_sink_create(drm_connector);
+ goto mst_dc_sink_create_done;
+ }
if (create_fake_sink(aconnector))
goto stream_create_fail;
@@ -2410,6 +2412,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream_create_fail:
dm_state_null:
drm_connector_null:
+mst_dc_sink_create_done:
return stream;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 117521c..0230250 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -189,6 +189,8 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock;
bool fake_enable;
+
+ bool mst_connected;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index f8efb98..638c2c2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -185,6 +185,42 @@ static int dm_connector_update_modes(struct drm_connector *connector,
return ret;
}
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct edid *edid;
+ struct dc_sink *dc_sink;
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+
+ edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+
+ if (!edid) {
+ drm_mode_connector_update_edid_property(
+ &aconnector->base,
+ NULL);
+ return;
+ }
+
+ aconnector->edid = edid;
+
+ dc_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)aconnector->edid,
+ (aconnector->edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ dc_sink->priv = aconnector;
+ aconnector->dc_sink = dc_sink;
+
+ amdgpu_dm_add_sink_to_freesync_module(
+ connector, aconnector->edid);
+
+ drm_mode_connector_update_edid_property(
+ &aconnector->base, aconnector->edid);
+}
+
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -311,6 +347,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_mode_connector_set_path_property(connector, pathprop);
drm_connector_list_iter_end(&conn_iter);
+ aconnector->mst_connected = true;
return &aconnector->base;
}
}
@@ -363,6 +400,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
*/
amdgpu_dm_connector_funcs_reset(connector);
+ aconnector->mst_connected = true;
+
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
@@ -394,6 +433,8 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_mode_connector_update_edid_property(
&aconnector->base,
NULL);
+
+ aconnector->mst_connected = false;
}
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -404,10 +445,18 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev);
}
+static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
+{
+ mutex_lock(&connector->dev->mode_config.mutex);
+ drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+}
+
static void dm_dp_mst_register_connector(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if (adev->mode_info.rfbdev)
drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@@ -416,6 +465,8 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
drm_connector_register(connector);
+ if (aconnector->mst_connected)
+ dm_dp_mst_link_status_reset(connector);
}
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 2da851b..8cf51da 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -31,5 +31,6 @@ struct amdgpu_dm_connector;
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector);
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 3dce35e..b142629 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -900,6 +900,15 @@ bool dcn_validate_bandwidth(
v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
+ /*
+ * Spreadsheet doesn't handle taps_c is one properly,
+ * need to force Chroma to always be scaled to pass
+ * bandwidth validation.
+ */
+ if (v->override_hta_pschroma[input_idx] == 1)
+ v->override_hta_pschroma[input_idx] = 2;
+ if (v->override_vta_pschroma[input_idx] == 1)
+ v->override_vta_pschroma[input_idx] = 2;
v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
}
if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index e27ed4a..42a111b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1801,7 +1801,7 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
link->link_enc->funcs->disable_output(link->link_enc, signal, link);
}
-bool dp_active_dongle_validate_timing(
+static bool dp_active_dongle_validate_timing(
const struct dc_crtc_timing *timing,
const struct dc_dongle_caps *dongle_caps)
{
@@ -1833,6 +1833,8 @@ bool dp_active_dongle_validate_timing(
/* Check Color Depth and Pixel Clock */
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
required_pix_clk /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ required_pix_clk = required_pix_clk * 2 / 3;
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 07ff8d2..d844fad 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2866,16 +2866,19 @@ static void dce110_apply_ctx_for_surface(
int num_planes,
struct dc_state *context)
{
- int i, be_idx;
+ int i;
if (num_planes == 0)
return;
- be_idx = -1;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (stream == context->res_ctx.pipe_ctx[i].stream) {
- be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
- break;
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (stream == pipe_ctx->stream) {
+ if (!pipe_ctx->top_pipe &&
+ (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
}
}
@@ -2895,9 +2898,22 @@ static void dce110_apply_ctx_for_surface(
context->stream_count);
dce110_program_front_end_for_pipe(dc, pipe_ctx);
+
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
+
program_surface_visibility(dc, pipe_ctx);
}
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if ((stream == pipe_ctx->stream) &&
+ (!pipe_ctx->top_pipe) &&
+ (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
+ }
}
static void dce110_power_down_fe(struct dc *dc, int fe_idx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 74e7c82..a9d55d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -159,11 +159,10 @@ bool dpp_get_optimal_number_of_taps(
scl_data->taps.h_taps = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert))
scl_data->taps.v_taps = 1;
- /*
- * Spreadsheet doesn't handle taps_c is one properly,
- * need to force Chroma to always be scaled to pass
- * bandwidth validation.
- */
+ if (IDENTITY_RATIO(scl_data->ratios.horz_c))
+ scl_data->taps.h_taps_c = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert_c))
+ scl_data->taps.v_taps_c = 1;
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index a9782b1..34daf89 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1360,7 +1360,7 @@ void dpp1_cm_set_output_csc_adjustment(
void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
- const struct default_adjustment *default_adjust);
+ enum dc_color_space colorspace);
void dpp1_cm_set_gamut_remap(
struct dpp *dpp,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 40627c2..ed1216b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -225,14 +225,13 @@ void dpp1_cm_set_gamut_remap(
void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
- const struct default_adjustment *default_adjust)
+ enum dc_color_space colorspace)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
uint32_t ocsc_mode = 0;
- if (default_adjust != NULL) {
- switch (default_adjust->out_color_space) {
+ switch (colorspace) {
case COLOR_SPACE_SRGB:
case COLOR_SPACE_2020_RGB_FULLRANGE:
ocsc_mode = 0;
@@ -253,7 +252,6 @@ void dpp1_cm_set_output_csc_default(
case COLOR_SPACE_UNKNOWN:
default:
break;
- }
}
REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 961ad5c..05dc01e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2097,6 +2097,8 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
tbl_entry.color_space = color_space;
//tbl_entry.regval = matrix;
pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry);
+ } else {
+ pipe_ctx->plane_res.dpp->funcs->opp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
}
}
static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 83a6846..9420dfb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -64,7 +64,7 @@ struct dpp_funcs {
void (*opp_set_csc_default)(
struct dpp *dpp,
- const struct default_adjustment *default_adjust);
+ enum dc_color_space colorspace);
void (*opp_set_csc_adjustment)(
struct dpp *dpp,
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2e065fa..a0f4d2a 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -168,16 +168,23 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
int x, int y)
{
+ const struct drm_format_info *format = fb->format;
+ unsigned int num_planes = format->num_planes;
u32 addr = drm_fb_obj(fb)->dev_addr;
- int num_planes = fb->format->num_planes;
int i;
if (num_planes > 3)
num_planes = 3;
- for (i = 0; i < num_planes; i++)
+ addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] +
+ x * format->cpp[0];
+
+ y /= format->vsub;
+ x /= format->hsub;
+
+ for (i = 1; i < num_planes; i++)
addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
- x * fb->format->cpp[i];
+ x * format->cpp[i];
for (; i < 3; i++)
addrs[i] = 0;
}
@@ -744,15 +751,14 @@ void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
if (plane->fb)
drm_framebuffer_put(plane->fb);
- /* Power down the Y/U/V FIFOs */
- sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
-
/* Power down most RAMs and FIFOs if this is the primary plane */
if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- sram_para1 |= CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
- CFG_PDWN32x32 | CFG_PDWN64x66;
+ sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+ CFG_PDWN32x32 | CFG_PDWN64x66;
dma_ctrl0_mask = CFG_GRA_ENA;
} else {
+ /* Power down the Y/U/V FIFOs */
+ sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
dma_ctrl0_mask = CFG_DMA_ENA;
}
@@ -1225,17 +1231,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
dcrtc);
- if (ret < 0) {
- kfree(dcrtc);
- return ret;
- }
+ if (ret < 0)
+ goto err_crtc;
if (dcrtc->variant->init) {
ret = dcrtc->variant->init(dcrtc, dev);
- if (ret) {
- kfree(dcrtc);
- return ret;
- }
+ if (ret)
+ goto err_crtc;
}
/* Ensure AXI pipeline is enabled */
@@ -1246,13 +1248,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
dcrtc->crtc.port = port;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (!primary)
- return -ENOMEM;
+ if (!primary) {
+ ret = -ENOMEM;
+ goto err_crtc;
+ }
ret = armada_drm_plane_init(primary);
if (ret) {
kfree(primary);
- return ret;
+ goto err_crtc;
}
ret = drm_universal_plane_init(drm, &primary->base, 0,
@@ -1263,7 +1267,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
- return ret;
+ goto err_crtc;
}
ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
@@ -1282,6 +1286,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
err_crtc_init:
primary->base.funcs->destroy(&primary->base);
+err_crtc:
+ kfree(dcrtc);
+
return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index bab11f4..bfd3514 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -42,6 +42,8 @@ struct armada_plane_work {
};
struct armada_plane_state {
+ u16 src_x;
+ u16 src_y;
u32 src_hw;
u32 dst_hw;
u32 dst_yx;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index b411b60..aba9476 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -99,6 +99,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
{
struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ const struct drm_format_info *format;
struct drm_rect src = {
.x1 = src_x,
.y1 = src_y,
@@ -117,7 +118,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
};
uint32_t val, ctrl0;
unsigned idx = 0;
- bool visible;
+ bool visible, fb_changed;
int ret;
trace_armada_ovl_plane_update(plane, crtc, fb,
@@ -138,6 +139,18 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (!visible)
ctrl0 &= ~CFG_DMA_ENA;
+ /*
+ * Shifting a YUV packed format image by one pixel causes the U/V
+ * planes to swap. Compensate for it by also toggling the UV swap.
+ */
+ format = fb->format;
+ if (format->num_planes == 1 && src.x1 >> 16 & (format->hsub - 1))
+ ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+
+ fb_changed = plane->fb != fb ||
+ dplane->base.state.src_x != src.x1 >> 16 ||
+ dplane->base.state.src_y != src.y1 >> 16;
+
if (!dcrtc->plane) {
dcrtc->plane = plane;
armada_ovl_update_attr(&dplane->prop, dcrtc);
@@ -145,7 +158,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* FIXME: overlay on an interlaced display */
/* Just updating the position/size? */
- if (plane->fb == fb && dplane->base.state.ctrl0 == ctrl0) {
+ if (!fb_changed && dplane->base.state.ctrl0 == ctrl0) {
val = (drm_rect_height(&src) & 0xffff0000) |
drm_rect_width(&src) >> 16;
dplane->base.state.src_hw = val;
@@ -169,9 +182,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
armada_drm_plane_work_cancel(dcrtc, &dplane->base);
- if (plane->fb != fb) {
- u32 addrs[3], pixel_format;
- int num_planes, hsub;
+ if (fb_changed) {
+ u32 addrs[3];
/*
* Take a reference on the new framebuffer - we want to
@@ -182,23 +194,11 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
if (plane->fb)
armada_ovl_retire_fb(dplane, plane->fb);
- src_y = src.y1 >> 16;
- src_x = src.x1 >> 16;
+ dplane->base.state.src_y = src_y = src.y1 >> 16;
+ dplane->base.state.src_x = src_x = src.x1 >> 16;
armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
- pixel_format = fb->format->format;
- hsub = drm_format_horz_chroma_subsampling(pixel_format);
- num_planes = fb->format->num_planes;
-
- /*
- * Annoyingly, shifting a YUYV-format image by one pixel
- * causes the U/V planes to toggle. Toggle the UV swap.
- * (Unfortunately, this causes momentary colour flickering.)
- */
- if (src_x & (hsub - 1) && num_planes == 1)
- ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
-
armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y0);
armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 4820141..9ae2360 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -152,14 +152,23 @@ static void drm_connector_free(struct kref *kref)
connector->funcs->destroy(connector);
}
-static void drm_connector_free_work_fn(struct work_struct *work)
+void drm_connector_free_work_fn(struct work_struct *work)
{
- struct drm_connector *connector =
- container_of(work, struct drm_connector, free_work);
- struct drm_device *dev = connector->dev;
+ struct drm_connector *connector, *n;
+ struct drm_device *dev =
+ container_of(work, struct drm_device, mode_config.connector_free_work);
+ struct drm_mode_config *config = &dev->mode_config;
+ unsigned long flags;
+ struct llist_node *freed;
- drm_mode_object_unregister(dev, &connector->base);
- connector->funcs->destroy(connector);
+ spin_lock_irqsave(&config->connector_list_lock, flags);
+ freed = llist_del_all(&config->connector_free_list);
+ spin_unlock_irqrestore(&config->connector_list_lock, flags);
+
+ llist_for_each_entry_safe(connector, n, freed, free_node) {
+ drm_mode_object_unregister(dev, &connector->base);
+ connector->funcs->destroy(connector);
+ }
}
/**
@@ -191,8 +200,6 @@ int drm_connector_init(struct drm_device *dev,
if (ret)
return ret;
- INIT_WORK(&connector->free_work, drm_connector_free_work_fn);
-
connector->base.properties = &connector->properties;
connector->dev = dev;
connector->funcs = funcs;
@@ -547,10 +554,17 @@ EXPORT_SYMBOL(drm_connector_list_iter_begin);
* actually release the connector when dropping our final reference.
*/
static void
-drm_connector_put_safe(struct drm_connector *conn)
+__drm_connector_put_safe(struct drm_connector *conn)
{
- if (refcount_dec_and_test(&conn->base.refcount.refcount))
- schedule_work(&conn->free_work);
+ struct drm_mode_config *config = &conn->dev->mode_config;
+
+ lockdep_assert_held(&config->connector_list_lock);
+
+ if (!refcount_dec_and_test(&conn->base.refcount.refcount))
+ return;
+
+ llist_add(&conn->free_node, &config->connector_free_list);
+ schedule_work(&config->connector_free_work);
}
/**
@@ -582,10 +596,10 @@ drm_connector_list_iter_next(struct drm_connector_list_iter *iter)
/* loop until it's not a zombie connector */
} while (!kref_get_unless_zero(&iter->conn->base.refcount));
- spin_unlock_irqrestore(&config->connector_list_lock, flags);
if (old_conn)
- drm_connector_put_safe(old_conn);
+ __drm_connector_put_safe(old_conn);
+ spin_unlock_irqrestore(&config->connector_list_lock, flags);
return iter->conn;
}
@@ -602,9 +616,15 @@ EXPORT_SYMBOL(drm_connector_list_iter_next);
*/
void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
{
+ struct drm_mode_config *config = &iter->dev->mode_config;
+ unsigned long flags;
+
iter->dev = NULL;
- if (iter->conn)
- drm_connector_put_safe(iter->conn);
+ if (iter->conn) {
+ spin_lock_irqsave(&config->connector_list_lock, flags);
+ __drm_connector_put_safe(iter->conn);
+ spin_unlock_irqrestore(&config->connector_list_lock, flags);
+ }
lock_release(&connector_list_iter_dep_map, 0, _RET_IP_);
}
EXPORT_SYMBOL(drm_connector_list_iter_end);
@@ -1231,6 +1251,19 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
if (edid)
size = EDID_LENGTH * (1 + edid->extensions);
+ /* Set the display info, using edid if available, otherwise
+ * reseting the values to defaults. This duplicates the work
+ * done in drm_add_edid_modes, but that function is not
+ * consistently called before this one in all drivers and the
+ * computation is cheap enough that it seems better to
+ * duplicate it rather than attempt to ensure some arbitrary
+ * ordering of calls.
+ */
+ if (edid)
+ drm_add_display_info(connector, edid);
+ else
+ drm_reset_display_info(connector);
+
drm_object_property_set_value(&connector->base,
dev->mode_config.non_desktop_property,
connector->display_info.non_desktop);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 9ebb884..af00f42 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -142,6 +142,7 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
uint64_t value);
int drm_connector_create_standard_properties(struct drm_device *dev);
const char *drm_get_connector_force_name(enum drm_connector_force force);
+void drm_connector_free_work_fn(struct work_struct *work);
/* IOCTL */
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5dfe147..cb48714 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1731,7 +1731,7 @@ EXPORT_SYMBOL(drm_edid_duplicate);
*
* Returns true if @vendor is in @edid, false otherwise
*/
-static bool edid_vendor(struct edid *edid, const char *vendor)
+static bool edid_vendor(const struct edid *edid, const char *vendor)
{
char edid_vendor[3];
@@ -1749,7 +1749,7 @@ static bool edid_vendor(struct edid *edid, const char *vendor)
*
* This tells subsequent routines what fixes they need to apply.
*/
-static u32 edid_get_quirks(struct edid *edid)
+static u32 edid_get_quirks(const struct edid *edid)
{
const struct edid_quirk *quirk;
int i;
@@ -2813,7 +2813,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
/*
* Search EDID for CEA extension block.
*/
-static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
+static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
{
u8 *edid_ext = NULL;
int i;
@@ -2835,12 +2835,12 @@ static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
return edid_ext;
}
-static u8 *drm_find_cea_extension(struct edid *edid)
+static u8 *drm_find_cea_extension(const struct edid *edid)
{
return drm_find_edid_extension(edid, CEA_EXT);
}
-static u8 *drm_find_displayid_extension(struct edid *edid)
+static u8 *drm_find_displayid_extension(const struct edid *edid)
{
return drm_find_edid_extension(edid, DISPLAYID_EXT);
}
@@ -4363,7 +4363,7 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
}
static void drm_parse_cea_ext(struct drm_connector *connector,
- struct edid *edid)
+ const struct edid *edid)
{
struct drm_display_info *info = &connector->display_info;
const u8 *edid_ext;
@@ -4397,11 +4397,33 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
}
-static void drm_add_display_info(struct drm_connector *connector,
- struct edid *edid, u32 quirks)
+/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
+ * all of the values which would have been set from EDID
+ */
+void
+drm_reset_display_info(struct drm_connector *connector)
{
struct drm_display_info *info = &connector->display_info;
+ info->width_mm = 0;
+ info->height_mm = 0;
+
+ info->bpc = 0;
+ info->color_formats = 0;
+ info->cea_rev = 0;
+ info->max_tmds_clock = 0;
+ info->dvi_dual = false;
+
+ info->non_desktop = 0;
+}
+EXPORT_SYMBOL_GPL(drm_reset_display_info);
+
+u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
+{
+ struct drm_display_info *info = &connector->display_info;
+
+ u32 quirks = edid_get_quirks(edid);
+
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
@@ -4414,11 +4436,13 @@ static void drm_add_display_info(struct drm_connector *connector,
info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+ DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
+
if (edid->revision < 3)
- return;
+ return quirks;
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
- return;
+ return quirks;
drm_parse_cea_ext(connector, edid);
@@ -4438,7 +4462,7 @@ static void drm_add_display_info(struct drm_connector *connector,
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
- return;
+ return quirks;
switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
case DRM_EDID_DIGITAL_DEPTH_6:
@@ -4473,7 +4497,9 @@ static void drm_add_display_info(struct drm_connector *connector,
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+ return quirks;
}
+EXPORT_SYMBOL_GPL(drm_add_display_info);
static int validate_displayid(u8 *displayid, int length, int idx)
{
@@ -4627,14 +4653,12 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
return 0;
}
- quirks = edid_get_quirks(edid);
-
/*
* CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks.
* To avoid multiple parsing of same block, lets parse that map
* from sink info, before parsing CEA modes.
*/
- drm_add_display_info(connector, edid, quirks);
+ quirks = drm_add_display_info(connector, edid);
/*
* EDID spec says modes should be preferred in this order:
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index d1eb56a..1402c0e 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -220,17 +220,6 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
mutex_lock(&dev->mode_config.idr_mutex);
- /* Insert the new lessee into the tree */
- id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
- if (id < 0) {
- error = id;
- goto out_lessee;
- }
-
- lessee->lessee_id = id;
- lessee->lessor = drm_master_get(lessor);
- list_add_tail(&lessee->lessee_list, &lessor->lessees);
-
idr_for_each_entry(leases, entry, object) {
error = 0;
if (!idr_find(&dev->mode_config.crtc_idr, object))
@@ -246,6 +235,17 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
}
}
+ /* Insert the new lessee into the tree */
+ id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
+ if (id < 0) {
+ error = id;
+ goto out_lessee;
+ }
+
+ lessee->lessee_id = id;
+ lessee->lessor = drm_master_get(lessor);
+ list_add_tail(&lessee->lessee_list, &lessor->lessees);
+
/* Move the leases over */
lessee->leases = *leases;
DRM_DEBUG_LEASE("new lessee %d %p, lessor %d %p\n", lessee->lessee_id, lessee, lessor->lessee_id, lessor);
@@ -254,10 +254,10 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
return lessee;
out_lessee:
- drm_master_put(&lessee);
-
mutex_unlock(&dev->mode_config.idr_mutex);
+ drm_master_put(&lessee);
+
return ERR_PTR(error);
}
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 61a1c8e..c3c79ee 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -575,21 +575,23 @@ EXPORT_SYMBOL(drm_mm_remove_node);
*/
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
+ struct drm_mm *mm = old->mm;
+
DRM_MM_BUG_ON(!old->allocated);
*new = *old;
list_replace(&old->node_list, &new->node_list);
- rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root);
+ rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
if (drm_mm_hole_follows(old)) {
list_replace(&old->hole_stack, &new->hole_stack);
rb_replace_node(&old->rb_hole_size,
&new->rb_hole_size,
- &old->mm->holes_size);
+ &mm->holes_size);
rb_replace_node(&old->rb_hole_addr,
&new->rb_hole_addr,
- &old->mm->holes_addr);
+ &mm->holes_addr);
}
old->allocated = false;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index cc78b3d..256de73 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -382,6 +382,9 @@ void drm_mode_config_init(struct drm_device *dev)
ida_init(&dev->mode_config.connector_ida);
spin_lock_init(&dev->mode_config.connector_list_lock);
+ init_llist_head(&dev->mode_config.connector_free_list);
+ INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
+
drm_mode_create_standard_properties(dev);
/* Just to be sure */
@@ -432,7 +435,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
}
drm_connector_list_iter_end(&conn_iter);
/* connector_iter drops references in a work item. */
- flush_scheduled_work();
+ flush_work(&dev->mode_config.connector_free_work);
if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 37a93cd..2c90519 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -558,11 +558,10 @@ int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
}
/*
- * setplane_internal - setplane handler for internal callers
+ * __setplane_internal - setplane handler for internal callers
*
- * Note that we assume an extra reference has already been taken on fb. If the
- * update fails, this reference will be dropped before return; if it succeeds,
- * the previous framebuffer (if any) will be unreferenced instead.
+ * This function will take a reference on the new fb for the plane
+ * on success.
*
* src_{x,y,w,h} are provided in 16.16 fixed point format
*/
@@ -630,14 +629,12 @@ static int __setplane_internal(struct drm_plane *plane,
if (!ret) {
plane->crtc = crtc;
plane->fb = fb;
- fb = NULL;
+ drm_framebuffer_get(plane->fb);
} else {
plane->old_fb = NULL;
}
out:
- if (fb)
- drm_framebuffer_put(fb);
if (plane->old_fb)
drm_framebuffer_put(plane->old_fb);
plane->old_fb = NULL;
@@ -685,6 +682,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_plane *plane;
struct drm_crtc *crtc = NULL;
struct drm_framebuffer *fb = NULL;
+ int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -717,15 +715,16 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
}
}
- /*
- * setplane_internal will take care of deref'ing either the old or new
- * framebuffer depending on success.
- */
- return setplane_internal(plane, crtc, fb,
- plane_req->crtc_x, plane_req->crtc_y,
- plane_req->crtc_w, plane_req->crtc_h,
- plane_req->src_x, plane_req->src_y,
- plane_req->src_w, plane_req->src_h);
+ ret = setplane_internal(plane, crtc, fb,
+ plane_req->crtc_x, plane_req->crtc_y,
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->src_x, plane_req->src_y,
+ plane_req->src_w, plane_req->src_h);
+
+ if (fb)
+ drm_framebuffer_put(fb);
+
+ return ret;
}
static int drm_mode_cursor_universal(struct drm_crtc *crtc,
@@ -788,13 +787,12 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
src_h = fb->height << 16;
}
- /*
- * setplane_internal will take care of deref'ing either the old or new
- * framebuffer depending on success.
- */
ret = __setplane_internal(crtc->cursor, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- 0, 0, src_w, src_h, ctx);
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ 0, 0, src_w, src_h, ctx);
+
+ if (fb)
+ drm_framebuffer_put(fb);
/* Update successful; save new cursor position, if necessary */
if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index f776fc1..cb4d09c 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -369,40 +369,26 @@ static const struct file_operations drm_syncobj_file_fops = {
.release = drm_syncobj_file_release,
};
-static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
-{
- struct file *file = anon_inode_getfile("syncobj_file",
- &drm_syncobj_file_fops,
- syncobj, 0);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- drm_syncobj_get(syncobj);
- if (cmpxchg(&syncobj->file, NULL, file)) {
- /* lost the race */
- fput(file);
- }
-
- return 0;
-}
-
int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
{
- int ret;
+ struct file *file;
int fd;
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0)
return fd;
- if (!syncobj->file) {
- ret = drm_syncobj_alloc_file(syncobj);
- if (ret) {
- put_unused_fd(fd);
- return ret;
- }
+ file = anon_inode_getfile("syncobj_file",
+ &drm_syncobj_file_fops,
+ syncobj, 0);
+ if (IS_ERR(file)) {
+ put_unused_fd(fd);
+ return PTR_ERR(file);
}
- fd_install(fd, syncobj->file);
+
+ drm_syncobj_get(syncobj);
+ fd_install(fd, file);
+
*p_fd = fd;
return 0;
}
@@ -422,31 +408,24 @@ static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
return ret;
}
-static struct drm_syncobj *drm_syncobj_fdget(int fd)
-{
- struct file *file = fget(fd);
-
- if (!file)
- return NULL;
- if (file->f_op != &drm_syncobj_file_fops)
- goto err;
-
- return file->private_data;
-err:
- fput(file);
- return NULL;
-};
-
static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
int fd, u32 *handle)
{
- struct drm_syncobj *syncobj = drm_syncobj_fdget(fd);
+ struct drm_syncobj *syncobj;
+ struct file *file;
int ret;
- if (!syncobj)
+ file = fget(fd);
+ if (!file)
return -EINVAL;
+ if (file->f_op != &drm_syncobj_file_fops) {
+ fput(file);
+ return -EINVAL;
+ }
+
/* take a reference to put in the idr */
+ syncobj = file->private_data;
drm_syncobj_get(syncobj);
idr_preload(GFP_KERNEL);
@@ -455,12 +434,14 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
spin_unlock(&file_private->syncobj_table_lock);
idr_preload_end();
- if (ret < 0) {
- fput(syncobj->file);
- return ret;
- }
- *handle = ret;
- return 0;
+ if (ret > 0) {
+ *handle = ret;
+ ret = 0;
+ } else
+ drm_syncobj_put(syncobj);
+
+ fput(file);
+ return ret;
}
static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 85d4c57..49af946 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2777,12 +2777,12 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
}
static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
- unsigned int opcode, int rings)
+ unsigned int opcode, unsigned long rings)
{
struct cmd_info *info = NULL;
unsigned int ring;
- for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
+ for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
info = find_cmd_entry(gvt, opcode, ring);
if (info)
break;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 3551208..309f3fa 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -266,6 +266,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
/* Clear host CRT status, so guest couldn't detect this host CRT. */
if (IS_BROADWELL(dev_priv))
vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
+
+ vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -282,7 +284,6 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
if (WARN_ON(resolution >= GVT_EDID_NUM))
@@ -308,7 +309,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
port->type = type;
emulate_monitor_status_change(vgpu);
- vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 8e33114..64d67ff 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1359,12 +1359,15 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
return ret;
} else {
if (!test_bit(index, spt->post_shadow_bitmap)) {
+ int type = spt->shadow_page.type;
+
ppgtt_get_shadow_entry(spt, &se, index);
ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
if (ret)
return ret;
+ ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &se, index);
}
-
ppgtt_set_post_shadow(spt, index);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 54b5d4c..e143004 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2368,6 +2368,9 @@ struct drm_i915_private {
*/
struct workqueue_struct *wq;
+ /* ordered wq for modesets */
+ struct workqueue_struct *modeset_wq;
+
/* Display functions */
struct drm_i915_display_funcs display;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ad4050f..5cfba89 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -330,17 +330,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
* must wait for all rendering to complete to the object (as unbinding
* must anyway), and retire the requests.
*/
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret)
return ret;
- i915_gem_retire_requests(to_i915(obj->base.dev));
-
while ((vma = list_first_entry_or_null(&obj->vma_list,
struct i915_vma,
obj_link))) {
@@ -474,7 +467,7 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
struct drm_i915_gem_request *rq;
struct intel_engine_cs *engine;
- if (!dma_fence_is_i915(fence))
+ if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
return;
rq = to_request(fence);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3866c49..7923dfd 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6977,6 +6977,7 @@ enum {
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30)
#define MASK_WAKEMEM (1<<13)
#define SKL_DFSM _MMIO(0x51000)
@@ -7026,6 +7027,8 @@ enum {
#define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308)
#define DISABLE_PIXEL_MASK_CAMMING (1<<14)
+#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
+
#define GEN7_L3SQCREG1 _MMIO(0xB010)
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -8522,6 +8525,7 @@ enum skl_power_gate {
#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
+#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index e8ca67a..ac236b8 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -367,6 +367,7 @@ struct i915_sw_dma_fence_cb {
struct dma_fence *dma;
struct timer_list timer;
struct irq_work work;
+ struct rcu_head rcu;
};
static void timer_i915_sw_fence_wake(struct timer_list *t)
@@ -406,7 +407,7 @@ static void irq_i915_sw_fence_work(struct irq_work *wrk)
del_timer_sync(&cb->timer);
dma_fence_put(cb->dma);
- kfree(cb);
+ kfree_rcu(cb, rcu);
}
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 5f8b9f1..bcbc7ab 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -186,7 +186,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
struct intel_wait *wait, *n, *first;
if (!b->irq_armed)
- return;
+ goto wakeup_signaler;
/* We only disarm the irq when we are idle (all requests completed),
* so if the bottom-half remains asleep, it missed the request
@@ -208,6 +208,14 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
b->waiters = RB_ROOT;
spin_unlock_irq(&b->rb_lock);
+
+ /*
+ * The signaling thread may be asleep holding a reference to a request,
+ * that had its signaling cancelled prior to being preempted. We need
+ * to kick the signaler, just in case, to release any such reference.
+ */
+wakeup_signaler:
+ wake_up_process(b->signaler);
}
static bool use_fake_irq(const struct intel_breadcrumbs *b)
@@ -651,23 +659,15 @@ static int intel_breadcrumbs_signaler(void *arg)
}
if (unlikely(do_schedule)) {
- DEFINE_WAIT(exec);
-
if (kthread_should_park())
kthread_parkme();
- if (kthread_should_stop()) {
- GEM_BUG_ON(request);
+ if (unlikely(kthread_should_stop())) {
+ i915_gem_request_put(request);
break;
}
- if (request)
- add_wait_queue(&request->execute, &exec);
-
schedule();
-
- if (request)
- remove_wait_queue(&request->execute, &exec);
}
i915_gem_request_put(request);
} while (1);
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index b2a6d62..60cf4e5 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -860,16 +860,10 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
{
- int min_cdclk = skl_calc_cdclk(0, vco);
u32 val;
WARN_ON(vco != 8100000 && vco != 8640000);
- /* select the minimum CDCLK before enabling DPLL 0 */
- val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
- I915_WRITE(CDCLK_CTL, val);
- POSTING_READ(CDCLK_CTL);
-
/*
* We always enable DPLL0 with the lowest link rate possible, but still
* taking into account the VCO required to operate the eDP panel at the
@@ -923,7 +917,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
- u32 freq_select, pcu_ack;
+ u32 freq_select, pcu_ack, cdclk_ctl;
int ret;
WARN_ON((cdclk == 24000) != (vco == 0));
@@ -940,7 +934,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
- /* set CDCLK_CTL */
+ /* Choose frequency for this cdclk */
switch (cdclk) {
case 450000:
case 432000:
@@ -968,10 +962,33 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
dev_priv->cdclk.hw.vco != vco)
skl_dpll0_disable(dev_priv);
+ cdclk_ctl = I915_READ(CDCLK_CTL);
+
+ if (dev_priv->cdclk.hw.vco != vco) {
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
+ cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ }
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ POSTING_READ(CDCLK_CTL);
+
if (dev_priv->cdclk.hw.vco != vco)
skl_dpll0_enable(dev_priv, vco);
- I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+
+ cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
+ I915_WRITE(CDCLK_CTL, cdclk_ctl);
POSTING_READ(CDCLK_CTL);
/* inform PCU of the change */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e0843bb..58a3755 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2128,6 +2128,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (WARN_ON(!pll))
return;
+ mutex_lock(&dev_priv->dpll_lock);
+
if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
@@ -2157,6 +2159,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
} else if (INTEL_INFO(dev_priv)->gen < 9) {
I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
}
+
+ mutex_unlock(&dev_priv->dpll_lock);
}
static void intel_ddi_clk_disable(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e8ccf89..50f8443 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1211,23 +1211,6 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
pipe_name(pipe));
}
-static void assert_cursor(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- bool cur_state;
-
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
- cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
- else
- cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
-
- I915_STATE_WARN(cur_state != state,
- "cursor on pipe %c assertion failure (expected %s, current %s)\n",
- pipe_name(pipe), onoff(state), onoff(cur_state));
-}
-#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
-#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
-
void assert_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
@@ -1255,77 +1238,25 @@ void assert_pipe(struct drm_i915_private *dev_priv,
pipe_name(pipe), onoff(state), onoff(cur_state));
}
-static void assert_plane(struct drm_i915_private *dev_priv,
- enum plane plane, bool state)
+static void assert_plane(struct intel_plane *plane, bool state)
{
- u32 val;
- bool cur_state;
+ bool cur_state = plane->get_hw_state(plane);
- val = I915_READ(DSPCNTR(plane));
- cur_state = !!(val & DISPLAY_PLANE_ENABLE);
I915_STATE_WARN(cur_state != state,
- "plane %c assertion failure (expected %s, current %s)\n",
- plane_name(plane), onoff(state), onoff(cur_state));
+ "%s assertion failure (expected %s, current %s)\n",
+ plane->base.name, onoff(state), onoff(cur_state));
}
-#define assert_plane_enabled(d, p) assert_plane(d, p, true)
-#define assert_plane_disabled(d, p) assert_plane(d, p, false)
+#define assert_plane_enabled(p) assert_plane(p, true)
+#define assert_plane_disabled(p) assert_plane(p, false)
-static void assert_planes_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void assert_planes_disabled(struct intel_crtc *crtc)
{
- int i;
-
- /* Primary planes are fixed to pipes on gen4+ */
- if (INTEL_GEN(dev_priv) >= 4) {
- u32 val = I915_READ(DSPCNTR(pipe));
- I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
- "plane %c assertion failure, should be disabled but not\n",
- plane_name(pipe));
- return;
- }
-
- /* Need to check both planes against the pipe */
- for_each_pipe(dev_priv, i) {
- u32 val = I915_READ(DSPCNTR(i));
- enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
- DISPPLANE_SEL_PIPE_SHIFT;
- I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
- "plane %c assertion failure, should be off on pipe %c but is still active\n",
- plane_name(i), pipe_name(pipe));
- }
-}
-
-static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- int sprite;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
- if (INTEL_GEN(dev_priv) >= 9) {
- for_each_sprite(dev_priv, pipe, sprite) {
- u32 val = I915_READ(PLANE_CTL(pipe, sprite));
- I915_STATE_WARN(val & PLANE_CTL_ENABLE,
- "plane %d assertion failure, should be off on pipe %c but is still active\n",
- sprite, pipe_name(pipe));
- }
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- for_each_sprite(dev_priv, pipe, sprite) {
- u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite));
- I915_STATE_WARN(val & SP_ENABLE,
- "sprite %c assertion failure, should be off on pipe %c but is still active\n",
- sprite_name(pipe, sprite), pipe_name(pipe));
- }
- } else if (INTEL_GEN(dev_priv) >= 7) {
- u32 val = I915_READ(SPRCTL(pipe));
- I915_STATE_WARN(val & SPRITE_ENABLE,
- "sprite %c assertion failure, should be off on pipe %c but is still active\n",
- plane_name(pipe), pipe_name(pipe));
- } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
- u32 val = I915_READ(DVSCNTR(pipe));
- I915_STATE_WARN(val & DVS_ENABLE,
- "sprite %c assertion failure, should be off on pipe %c but is still active\n",
- plane_name(pipe), pipe_name(pipe));
- }
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+ assert_plane_disabled(plane);
}
static void assert_vblank_disabled(struct drm_crtc *crtc)
@@ -1918,9 +1849,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
- assert_planes_disabled(dev_priv, pipe);
- assert_cursor_disabled(dev_priv, pipe);
- assert_sprites_disabled(dev_priv, pipe);
+ assert_planes_disabled(crtc);
/*
* A pipe without a PLL won't actually be able to drive bits from
@@ -1989,9 +1918,7 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
* Make sure planes won't keep trying to pump pixels to us,
* or we might hang the display.
*/
- assert_planes_disabled(dev_priv, pipe);
- assert_cursor_disabled(dev_priv, pipe);
- assert_sprites_disabled(dev_priv, pipe);
+ assert_planes_disabled(crtc);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
@@ -2820,6 +2747,23 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
crtc_state->active_planes);
}
+static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane)
+{
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ intel_set_plane_visible(crtc_state, plane_state, false);
+
+ if (plane->id == PLANE_PRIMARY)
+ intel_pre_disable_primary_noatomic(&crtc->base);
+
+ trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, crtc);
+}
+
static void
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
@@ -2877,12 +2821,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
* simplest solution is to just disable the primary plane now and
* pretend the BIOS never had it enabled.
*/
- intel_set_plane_visible(to_intel_crtc_state(crtc_state),
- to_intel_plane_state(plane_state),
- false);
- intel_pre_disable_primary_noatomic(&intel_crtc->base);
- trace_intel_disable_plane(primary, intel_crtc);
- intel_plane->disable_plane(intel_plane, intel_crtc);
+ intel_plane_disable_noatomic(intel_crtc, intel_plane);
return;
@@ -3385,6 +3324,31 @@ static void i9xx_disable_primary_plane(struct intel_plane *primary,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static bool i9xx_plane_get_hw_state(struct intel_plane *primary)
+{
+
+ struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum plane plane = primary->plane;
+ enum pipe pipe = primary->pipe;
+ bool ret;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-4 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static u32
intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
{
@@ -4866,7 +4830,8 @@ void hsw_enable_ips(struct intel_crtc *crtc)
* a vblank wait.
*/
- assert_plane_enabled(dev_priv, crtc->plane);
+ assert_plane_enabled(to_intel_plane(crtc->base.primary));
+
if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
@@ -4899,7 +4864,8 @@ void hsw_disable_ips(struct intel_crtc *crtc)
if (!crtc->config->ips_enabled)
return;
- assert_plane_enabled(dev_priv, crtc->plane);
+ assert_plane_enabled(to_intel_plane(crtc->base.primary));
+
if (IS_BROADWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
@@ -5899,6 +5865,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum intel_display_power_domain domain;
+ struct intel_plane *plane;
u64 domains;
struct drm_atomic_state *state;
struct intel_crtc_state *crtc_state;
@@ -5907,11 +5874,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
if (!intel_crtc->active)
return;
- if (crtc->primary->state->visible) {
- intel_pre_disable_primary_noatomic(crtc);
+ for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
- intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
- crtc->primary->state->visible = false;
+ if (plane_state->base.visible)
+ intel_plane_disable_noatomic(intel_crtc, plane);
}
state = drm_atomic_state_alloc(crtc->dev);
@@ -9477,6 +9445,23 @@ static void i845_disable_cursor(struct intel_plane *plane,
i845_update_cursor(plane, NULL, NULL);
}
+static bool i845_cursor_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(PIPE_A);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -9670,6 +9655,28 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
i9xx_update_cursor(plane, NULL, NULL);
}
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ /*
+ * Not 100% correct for planes that can move between pipes,
+ * but that's only the case for gen2-3 which don't have any
+ * display power wells.
+ */
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
/* VESA 640x480x72Hz mode to set on the pipe */
static const struct drm_display_mode load_detect_mode = {
@@ -9944,11 +9951,10 @@ found:
}
ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
+ drm_framebuffer_put(fb);
if (ret)
goto fail;
- drm_framebuffer_put(fb);
-
ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
if (ret)
goto fail;
@@ -12545,11 +12551,15 @@ static int intel_atomic_commit(struct drm_device *dev,
INIT_WORK(&state->commit_work, intel_atomic_commit_work);
i915_sw_fence_commit(&intel_state->commit_ready);
- if (nonblock)
+ if (nonblock && intel_state->modeset) {
+ queue_work(dev_priv->modeset_wq, &state->commit_work);
+ } else if (nonblock) {
queue_work(system_unbound_wq, &state->commit_work);
- else
+ } else {
+ if (intel_state->modeset)
+ flush_workqueue(dev_priv->modeset_wq);
intel_atomic_commit_tail(state);
-
+ }
return 0;
}
@@ -13195,13 +13205,14 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 10) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
modifiers = skl_format_modifiers_ccs;
primary->update_plane = skl_update_plane;
primary->disable_plane = skl_disable_plane;
+ primary->get_hw_state = skl_plane_get_hw_state;
} else if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
@@ -13212,6 +13223,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = skl_update_plane;
primary->disable_plane = skl_disable_plane;
+ primary->get_hw_state = skl_plane_get_hw_state;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13219,6 +13231,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
+ primary->get_hw_state = i9xx_plane_get_hw_state;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
@@ -13226,6 +13239,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = i9xx_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
+ primary->get_hw_state = i9xx_plane_get_hw_state;
}
if (INTEL_GEN(dev_priv) >= 9)
@@ -13315,10 +13329,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
cursor->update_plane = i845_update_cursor;
cursor->disable_plane = i845_disable_cursor;
+ cursor->get_hw_state = i845_cursor_get_hw_state;
cursor->check_plane = i845_check_cursor;
} else {
cursor->update_plane = i9xx_update_cursor;
cursor->disable_plane = i9xx_disable_cursor;
+ cursor->get_hw_state = i9xx_cursor_get_hw_state;
cursor->check_plane = i9xx_check_cursor;
}
@@ -14463,6 +14479,8 @@ int intel_modeset_init(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *crtc;
+ dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+
drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
@@ -14666,8 +14684,11 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
pipe_name(pipe));
- assert_plane_disabled(dev_priv, PLANE_A);
- assert_plane_disabled(dev_priv, PLANE_B);
+ WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
+ WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
+ WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
+ WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
+ WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
I915_WRITE(PIPECONF(pipe), 0);
POSTING_READ(PIPECONF(pipe));
@@ -14678,22 +14699,36 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
POSTING_READ(DPLL(pipe));
}
-static bool
-intel_check_plane_mapping(struct intel_crtc *crtc)
+static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
+ struct intel_plane *primary)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 val;
+ enum plane plane = primary->plane;
+ u32 val = I915_READ(DSPCNTR(plane));
- if (INTEL_INFO(dev_priv)->num_pipes == 1)
- return true;
+ return (val & DISPLAY_PLANE_ENABLE) == 0 ||
+ (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
+}
+
+static void
+intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
- val = I915_READ(DSPCNTR(!crtc->plane));
+ if (INTEL_GEN(dev_priv) >= 4)
+ return;
- if ((val & DISPLAY_PLANE_ENABLE) &&
- (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
- return false;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
- return true;
+ if (intel_plane_mapping_ok(crtc, plane))
+ continue;
+
+ DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
+ plane->base.name);
+ intel_plane_disable_noatomic(crtc, plane);
+ }
}
static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
@@ -14749,33 +14784,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
/* Disable everything but the primary plane */
for_each_intel_plane_on_crtc(dev, crtc, plane) {
- if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
- continue;
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
- trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, crtc);
+ if (plane_state->base.visible &&
+ plane->base.type != DRM_PLANE_TYPE_PRIMARY)
+ intel_plane_disable_noatomic(crtc, plane);
}
}
- /* We need to sanitize the plane -> pipe mapping first because this will
- * disable the crtc (and hence change the state) if it is wrong. Note
- * that gen4+ has a fixed plane -> pipe mapping. */
- if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) {
- bool plane;
-
- DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
- crtc->base.base.id, crtc->base.name);
-
- /* Pipe has the wrong plane attached and the plane is active.
- * Temporarily change the plane mapping and disable everything
- * ... */
- plane = crtc->plane;
- crtc->base.primary->state->visible = true;
- crtc->plane = !plane;
- intel_crtc_disable_noatomic(&crtc->base, ctx);
- crtc->plane = plane;
- }
-
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
if (crtc->active && !intel_crtc_has_encoders(crtc))
@@ -14880,24 +14897,21 @@ void i915_redisable_vga(struct drm_i915_private *dev_priv)
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
}
-static bool primary_get_hw_state(struct intel_plane *plane)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-
- return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
-}
-
/* FIXME read out full plane state for all planes */
static void readout_plane_state(struct intel_crtc *crtc)
{
- struct intel_plane *primary = to_intel_plane(crtc->base.primary);
- bool visible;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane;
- visible = crtc->active && primary_get_hw_state(primary);
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ bool visible = plane->get_hw_state(plane);
- intel_set_plane_visible(to_intel_crtc_state(crtc->base.state),
- to_intel_plane_state(primary->base.state),
- visible);
+ intel_set_plane_visible(crtc_state, plane_state, visible);
+ }
}
static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15095,6 +15109,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
/* HW state is read out, now we need to sanitize this mess. */
get_encoder_power_domains(dev_priv);
+ intel_sanitize_plane_mapping(dev_priv);
+
for_each_intel_encoder(dev, encoder) {
intel_sanitize_encoder(encoder);
}
@@ -15271,6 +15287,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_cleanup_gt_powersave(dev_priv);
intel_teardown_gmbus(dev_priv);
+
+ destroy_workqueue(dev_priv->modeset_wq);
}
void intel_connector_attach_encoder(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 6c7f8bc..5d77f75 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -862,6 +862,7 @@ struct intel_plane {
const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
struct intel_crtc *crtc);
+ bool (*get_hw_state)(struct intel_plane *plane);
int (*check_plane)(struct intel_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
@@ -1924,6 +1925,7 @@ void skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
+bool skl_plane_get_hw_state(struct intel_plane *plane);
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index ab5bf4e..6074e04d 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1390,6 +1390,11 @@ static int glk_init_workarounds(struct intel_engine_cs *engine)
if (ret)
return ret;
+ /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
+ ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+ if (ret)
+ return ret;
+
/* WaToEnableHwFixForPushConstHWBug:glk */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 3bf6528..5809b29 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -193,7 +193,7 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
};
if (!pci_dev_present(atom_hdaudio_ids)) {
- DRM_INFO("%s\n", "HDaudio controller not detected, using LPE audio instead\n");
+ DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
lpe_present = true;
}
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d36e256..e71a8cd 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -974,6 +974,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
+ if (i915_gem_request_completed(request))
+ return;
+
if (prio <= READ_ONCE(request->priotree.priority))
return;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 6e3b430..55ea5eb 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -590,7 +590,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->psr.active) {
- i915_reg_t psr_ctl;
+ i915_reg_t psr_status;
u32 psr_status_mask;
if (dev_priv->psr.aux_frame_sync)
@@ -599,24 +599,24 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
0);
if (dev_priv->psr.psr2_support) {
- psr_ctl = EDP_PSR2_CTL;
+ psr_status = EDP_PSR2_STATUS_CTL;
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
- I915_WRITE(psr_ctl,
- I915_READ(psr_ctl) &
+ I915_WRITE(EDP_PSR2_CTL,
+ I915_READ(EDP_PSR2_CTL) &
~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
} else {
- psr_ctl = EDP_PSR_STATUS_CTL;
+ psr_status = EDP_PSR_STATUS_CTL;
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
- I915_WRITE(psr_ctl,
- I915_READ(psr_ctl) & ~EDP_PSR_ENABLE);
+ I915_WRITE(EDP_PSR_CTL,
+ I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
}
/* Wait till PSR is idle */
if (intel_wait_for_register(dev_priv,
- psr_ctl, psr_status_mask, 0,
+ psr_status, psr_status_mask, 0,
2000))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8af286c..7e115f3 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Enabling DC5\n");
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ SKL_SELECT_ALTERNATE_DC_EXIT);
+
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
@@ -625,6 +630,11 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
{
DRM_DEBUG_KMS("Disabling DC6\n");
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ SKL_SELECT_ALTERNATE_DC_EXIT);
+
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
}
@@ -1786,6 +1796,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 4fcf80c..4a8a5d9 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -329,6 +329,26 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+bool
+skl_plane_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static void
chv_update_csc(struct intel_plane *plane, uint32_t format)
{
@@ -506,6 +526,26 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static bool
+vlv_plane_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -646,6 +686,25 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static bool
+ivb_plane_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -777,6 +836,25 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static bool
+g4x_plane_get_hw_state(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = plane->pipe;
+ bool ret;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+
+ ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE;
+
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
static int
intel_check_sprite_plane(struct intel_plane *plane,
struct intel_crtc_state *crtc_state,
@@ -1232,6 +1310,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
+ intel_plane->get_hw_state = skl_plane_get_hw_state;
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1242,6 +1321,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
+ intel_plane->get_hw_state = skl_plane_get_hw_state;
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1252,6 +1332,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = vlv_update_plane;
intel_plane->disable_plane = vlv_disable_plane;
+ intel_plane->get_hw_state = vlv_plane_get_hw_state;
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
@@ -1267,6 +1348,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
+ intel_plane->get_hw_state = ivb_plane_get_hw_state;
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1277,6 +1359,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = g4x_update_plane;
intel_plane->disable_plane = g4x_disable_plane;
+ intel_plane->get_hw_state = g4x_plane_get_hw_state;
modifiers = i9xx_plane_format_modifiers;
if (IS_GEN6(dev_priv)) {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 0760b93..baab933 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -121,6 +121,7 @@ int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2615912..ef68741 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -224,7 +224,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
/* Determine if we can get a cache-coherent map, forcing
* uncached mapping if we can't.
*/
- if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
+ if (!nouveau_drm_use_coherent_gpu_mapping(drm))
nvbo->force_coherent = true;
}
@@ -262,7 +262,8 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
(flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
continue;
- if ((flags & TTM_PL_FLAG_TT ) && !vmm->page[i].host)
+ if ((flags & TTM_PL_FLAG_TT) &&
+ (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
/* Select this page size if it's the first that supports
@@ -1446,11 +1447,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
args.nv50.ro = 0;
args.nv50.kind = mem->kind;
args.nv50.comp = mem->comp;
+ argc = sizeof(args.nv50);
break;
case NVIF_CLASS_MEM_GF100:
args.gf100.version = 0;
args.gf100.ro = 0;
args.gf100.kind = mem->kind;
+ argc = sizeof(args.gf100);
break;
default:
WARN_ON(1);
@@ -1458,7 +1461,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
}
ret = nvif_object_map_handle(&mem->mem.object,
- &argc, argc,
+ &args, argc,
&handle, &length);
if (ret != 1)
return ret ? ret : -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8d4a5be..56fe261 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -152,9 +152,9 @@ nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
work->cli = cli;
mutex_lock(&cli->lock);
list_add_tail(&work->head, &cli->worker);
- mutex_unlock(&cli->lock);
if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
nouveau_cli_work_fence(fence, &work->cb);
+ mutex_unlock(&cli->lock);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 3331e82..96f6bd8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -157,8 +157,8 @@ struct nouveau_drm {
struct nvif_object copy;
int mtrr;
int type_vram;
- int type_host;
- int type_ncoh;
+ int type_host[2];
+ int type_ncoh[2];
} ttm;
/* GEM interface support */
@@ -217,6 +217,13 @@ nouveau_drm(struct drm_device *dev)
return dev->dev_private;
}
+static inline bool
+nouveau_drm_use_coherent_gpu_mapping(struct nouveau_drm *drm)
+{
+ struct nvif_mmu *mmu = &drm->client.mmu;
+ return !(mmu->type[drm->ttm.type_host[0]].type & NVIF_MEM_UNCACHED);
+}
+
int nouveau_pmops_suspend(struct device *);
int nouveau_pmops_resume(struct device *);
bool nouveau_pmops_runtime(void);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index c533d8e..be7357b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -429,7 +429,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
drm_fb_helper_unregister_fbi(&fbcon->helper);
drm_fb_helper_fini(&fbcon->helper);
- if (nouveau_fb->nvbo) {
+ if (nouveau_fb && nouveau_fb->nvbo) {
nouveau_vma_del(&nouveau_fb->vma);
nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_unpin(nouveau_fb->nvbo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 589a962..c002f89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -103,10 +103,10 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
u8 type;
int ret;
- if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
- type = drm->ttm.type_ncoh;
+ if (!nouveau_drm_use_coherent_gpu_mapping(drm))
+ type = drm->ttm.type_ncoh[!!mem->kind];
else
- type = drm->ttm.type_host;
+ type = drm->ttm.type_host[0];
if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
mem->comp = mem->kind = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 08b974b..dff51a0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -235,27 +235,46 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
drm->ttm.mem_global_ref.release = NULL;
}
-int
-nouveau_ttm_init(struct nouveau_drm *drm)
+static int
+nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
{
- struct nvkm_device *device = nvxx_device(&drm->client.device);
- struct nvkm_pci *pci = device->pci;
struct nvif_mmu *mmu = &drm->client.mmu;
- struct drm_device *dev = drm->dev;
- int typei, ret;
+ int typei;
typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
- NVIF_MEM_COHERENT);
+ kind | NVIF_MEM_COHERENT);
if (typei < 0)
return -ENOSYS;
- drm->ttm.type_host = typei;
+ drm->ttm.type_host[!!kind] = typei;
- typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE);
+ typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
if (typei < 0)
return -ENOSYS;
- drm->ttm.type_ncoh = typei;
+ drm->ttm.type_ncoh[!!kind] = typei;
+ return 0;
+}
+
+int
+nouveau_ttm_init(struct nouveau_drm *drm)
+{
+ struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_pci *pci = device->pci;
+ struct nvif_mmu *mmu = &drm->client.mmu;
+ struct drm_device *dev = drm->dev;
+ int typei, ret;
+
+ ret = nouveau_ttm_init_host(drm, 0);
+ if (ret)
+ return ret;
+
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+ drm->client.device.info.chipset != 0x50) {
+ ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
+ if (ret)
+ return ret;
+ }
if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index 9e2628d..f5371d9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -67,8 +67,8 @@ nouveau_vma_del(struct nouveau_vma **pvma)
nvif_vmm_put(&vma->vmm->vmm, &tmp);
}
list_del(&vma->head);
- *pvma = NULL;
kfree(*pvma);
+ *pvma = NULL;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index e146436..08e77cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1251,7 +1251,7 @@ nvaa_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g98_mc_new,
- .mmu = g84_mmu_new,
+ .mmu = mcp77_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1283,7 +1283,7 @@ nvac_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g98_mc_new,
- .mmu = g84_mmu_new,
+ .mmu = mcp77_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -2369,7 +2369,7 @@ nv13b_chipset = {
.imem = gk20a_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp10b_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gp10b_mmu_new,
.secboot = gp10b_secboot_new,
.pmu = gm20b_pmu_new,
.timer = gk20a_timer_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index a2978a3..700fc75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -174,6 +174,7 @@ gf119_sor = {
.links = gf119_sor_dp_links,
.power = g94_sor_dp_power,
.pattern = gf119_sor_dp_pattern,
+ .drive = gf119_sor_dp_drive,
.vcpi = gf119_sor_dp_vcpi,
.audio = gf119_sor_dp_audio,
.audio_sym = gf119_sor_dp_audio_sym,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 9646ade..243f0a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -73,7 +73,8 @@ static int
nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
- bar->func->bar1.fini(bar);
+ if (bar->func->bar1.fini)
+ bar->func->bar1.fini(bar);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index b10077d..35878fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -26,7 +26,6 @@ gk20a_bar_func = {
.dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit,
.bar1.init = gf100_bar_bar1_init,
- .bar1.fini = gf100_bar_bar1_fini,
.bar1.wait = gf100_bar_bar1_wait,
.bar1.vmm = gf100_bar_bar1_vmm,
.flush = g84_bar_flush,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 972370e..7c7efa4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -36,6 +36,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
if (data) {
*ver = nvbios_rd08(bios, data + 0x00);
switch (*ver) {
+ case 0x20:
case 0x21:
case 0x30:
case 0x40:
@@ -63,6 +64,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
if (data && idx < *cnt) {
u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len);
switch (*ver * !!outp) {
+ case 0x20:
case 0x21:
case 0x30:
*hdr = nvbios_rd08(bios, data + 0x04);
@@ -96,12 +98,16 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
info->type = nvbios_rd16(bios, data + 0x00);
info->mask = nvbios_rd16(bios, data + 0x02);
switch (*ver) {
+ case 0x20:
+ info->mask |= 0x00c0; /* match any link */
+ /* fall-through */
case 0x21:
case 0x30:
info->flags = nvbios_rd08(bios, data + 0x05);
info->script[0] = nvbios_rd16(bios, data + 0x06);
info->script[1] = nvbios_rd16(bios, data + 0x08);
- info->lnkcmp = nvbios_rd16(bios, data + 0x0a);
+ if (*len >= 0x0c)
+ info->lnkcmp = nvbios_rd16(bios, data + 0x0a);
if (*len >= 0x0f) {
info->script[2] = nvbios_rd16(bios, data + 0x0c);
info->script[3] = nvbios_rd16(bios, data + 0x0e);
@@ -170,6 +176,7 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
memset(info, 0x00, sizeof(*info));
if (data) {
switch (*ver) {
+ case 0x20:
case 0x21:
info->dc = nvbios_rd08(bios, data + 0x02);
info->pe = nvbios_rd08(bios, data + 0x03);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 1ba7289..db48a1d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -249,7 +249,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
- refcount_inc(&iobj->maps);
+ refcount_set(&iobj->maps, 1);
}
mutex_unlock(&imem->subdev.mutex);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 352a65f..67ee983 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -4,6 +4,7 @@ nvkm-y += nvkm/subdev/mmu/nv41.o
nvkm-y += nvkm/subdev/mmu/nv44.o
nvkm-y += nvkm/subdev/mmu/nv50.o
nvkm-y += nvkm/subdev/mmu/g84.o
+nvkm-y += nvkm/subdev/mmu/mcp77.o
nvkm-y += nvkm/subdev/mmu/gf100.o
nvkm-y += nvkm/subdev/mmu/gk104.o
nvkm-y += nvkm/subdev/mmu/gk20a.o
@@ -22,6 +23,7 @@ nvkm-y += nvkm/subdev/mmu/vmmnv04.o
nvkm-y += nvkm/subdev/mmu/vmmnv41.o
nvkm-y += nvkm/subdev/mmu/vmmnv44.o
nvkm-y += nvkm/subdev/mmu/vmmnv50.o
+nvkm-y += nvkm/subdev/mmu/vmmmcp77.o
nvkm-y += nvkm/subdev/mmu/vmmgf100.o
nvkm-y += nvkm/subdev/mmu/vmmgk104.o
nvkm-y += nvkm/subdev/mmu/vmmgk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
new file mode 100644
index 0000000..0527b50
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+mcp77_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, mcp77_vmm_new, false, 0x0200 },
+ .kind = nv50_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+mcp77_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&mcp77_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 6d8f61e..da06e64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -95,6 +95,9 @@ struct nvkm_vmm_desc {
const struct nvkm_vmm_desc_func *func;
};
+extern const struct nvkm_vmm_desc nv50_vmm_desc_12[];
+extern const struct nvkm_vmm_desc nv50_vmm_desc_16[];
+
extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[];
extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[];
extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[];
@@ -169,6 +172,11 @@ int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
const char *, struct nvkm_vmm **);
int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+int nv50_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+void nv50_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
+int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+void nv50_vmm_flush(struct nvkm_vmm *, int);
+
int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
@@ -200,6 +208,8 @@ int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
+int mcp77_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
new file mode 100644
index 0000000..e63d984
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+static const struct nvkm_vmm_func
+mcp77_vmm = {
+ .join = nv50_vmm_join,
+ .part = nv50_vmm_part,
+ .valid = nv50_vmm_valid,
+ .flush = nv50_vmm_flush,
+ .page_block = 1 << 29,
+ .page = {
+ { 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxx },
+ { 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+int
+mcp77_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&mcp77_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
index 863a2ed..64f75d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -32,7 +32,7 @@ static inline void
nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
{
- u64 next = addr | map->type, data;
+ u64 next = addr + map->type, data;
u32 pten;
int log2blk;
@@ -69,7 +69,7 @@ nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
nvkm_kmap(pt->memory);
while (ptes--) {
- const u64 data = *map->dma++ | map->type;
+ const u64 data = *map->dma++ + map->type;
VMM_WO064(pt, vmm, ptei++ * 8, data);
map->type += map->ctag;
}
@@ -163,21 +163,21 @@ nv50_vmm_pgd = {
.pde = nv50_vmm_pgd_pde,
};
-static const struct nvkm_vmm_desc
+const struct nvkm_vmm_desc
nv50_vmm_desc_12[] = {
{ PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
{ PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
{}
};
-static const struct nvkm_vmm_desc
+const struct nvkm_vmm_desc
nv50_vmm_desc_16[] = {
{ PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
{ PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
{}
};
-static void
+void
nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
{
struct nvkm_subdev *subdev = &vmm->mmu->subdev;
@@ -223,7 +223,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
mutex_unlock(&subdev->mutex);
}
-static int
+int
nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
struct nvkm_vmm_map *map)
{
@@ -321,7 +321,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return 0;
}
-static void
+void
nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
struct nvkm_vmm_join *join;
@@ -335,7 +335,7 @@ nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
}
}
-static int
+int
nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index b1b1f36..ee2431a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg)
struct nvkm_pci *pci = arg;
struct nvkm_device *device = pci->subdev.device;
bool handled = false;
+
+ if (pci->irq < 0)
+ return IRQ_HANDLED;
+
nvkm_mc_intr_unarm(device);
if (pci->msi)
pci->func->msi_rearm(pci);
@@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
- if (pci->irq >= 0) {
- free_irq(pci->irq, pci);
- pci->irq = -1;
- }
-
if (pci->agp.bridge)
nvkm_agp_fini(pci);
@@ -108,8 +107,20 @@ static int
nvkm_pci_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
- if (pci_is_pcie(pci->pdev))
- return nvkm_pcie_oneinit(pci);
+ struct pci_dev *pdev = pci->pdev;
+ int ret;
+
+ if (pci_is_pcie(pci->pdev)) {
+ ret = nvkm_pcie_oneinit(pci);
+ if (ret)
+ return ret;
+ }
+
+ ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
+ if (ret)
+ return ret;
+
+ pci->irq = pdev->irq;
return 0;
}
@@ -117,7 +128,6 @@ static int
nvkm_pci_init(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
- struct pci_dev *pdev = pci->pdev;
int ret;
if (pci->agp.bridge) {
@@ -131,21 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
if (pci->func->init)
pci->func->init(pci);
- ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
- if (ret)
- return ret;
+ /* Ensure MSI interrupts are armed, for the case where there are
+ * already interrupts pending (for whatever reason) at load time.
+ */
+ if (pci->msi)
+ pci->func->msi_rearm(pci);
- pci->irq = pdev->irq;
- return ret;
+ return 0;
}
static void *
nvkm_pci_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
+
nvkm_agp_dtor(pci);
+
+ if (pci->irq >= 0) {
+ /* freq_irq() will call the handler, we use pci->irq == -1
+ * to signal that it's been torn down and should be a noop.
+ */
+ int irq = pci->irq;
+ pci->irq = -1;
+ free_irq(irq, pci);
+ }
+
if (pci->msi)
pci_disable_msi(pci->pdev);
+
return nvkm_pci(subdev);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index e626edd..23db74a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -78,6 +78,8 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
/* then read the message */
msg.len = cnt & 0xf;
+ if (msg.len > CEC_MAX_MSG_SIZE - 2)
+ msg.len = CEC_MAX_MSG_SIZE - 2;
msg.msg[0] = hdmi_read_reg(core->base,
HDMI_CEC_RX_CMD_HEADER);
msg.msg[1] = hdmi_read_reg(core->base,
@@ -104,26 +106,6 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core)
}
}
-static void hdmi_cec_transmit_fifo_empty(struct hdmi_core_data *core, u32 stat1)
-{
- if (stat1 & 2) {
- u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
-
- cec_transmit_done(core->adap,
- CEC_TX_STATUS_NACK |
- CEC_TX_STATUS_MAX_RETRIES,
- 0, (dbg3 >> 4) & 7, 0, 0);
- } else if (stat1 & 1) {
- cec_transmit_done(core->adap,
- CEC_TX_STATUS_ARB_LOST |
- CEC_TX_STATUS_MAX_RETRIES,
- 0, 0, 0, 0);
- } else if (stat1 == 0) {
- cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
- 0, 0, 0, 0);
- }
-}
-
void hdmi4_cec_irq(struct hdmi_core_data *core)
{
u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0);
@@ -132,27 +114,21 @@ void hdmi4_cec_irq(struct hdmi_core_data *core)
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0);
hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1);
- if (stat0 & 0x40)
+ if (stat0 & 0x20) {
+ cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
+ 0, 0, 0, 0);
REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
- else if (stat0 & 0x24)
- hdmi_cec_transmit_fifo_empty(core, stat1);
- if (stat1 & 2) {
+ } else if (stat1 & 0x02) {
u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
cec_transmit_done(core->adap,
CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES,
0, (dbg3 >> 4) & 7, 0, 0);
- } else if (stat1 & 1) {
- cec_transmit_done(core->adap,
- CEC_TX_STATUS_ARB_LOST |
- CEC_TX_STATUS_MAX_RETRIES,
- 0, 0, 0, 0);
+ REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
}
if (stat0 & 0x02)
hdmi_cec_received_msg(core);
- if (stat1 & 0x3)
- REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
}
static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap)
@@ -231,18 +207,14 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
/*
* Enable CEC interrupts:
* Transmit Buffer Full/Empty Change event
- * Transmitter FIFO Empty event
* Receiver FIFO Not Empty event
*/
- hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x26);
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x22);
/*
* Enable CEC interrupts:
- * RX FIFO Overrun Error event
- * Short Pulse Detected event
* Frame Retransmit Count Exceeded event
- * Start Bit Irregularity event
*/
- hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x0f);
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x02);
/* cec calibration enable (self clearing) */
hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index dda904e..500b6fb 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -175,11 +175,31 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
writel(val, hdmi->base + SUN4I_HDMI_VID_TIMING_POL_REG);
}
+static enum drm_mode_status sun4i_hdmi_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
+ unsigned long rate = mode->clock * 1000;
+ unsigned long diff = rate / 200; /* +-0.5% allowed by HDMI spec */
+ long rounded_rate;
+
+ /* 165 MHz is the typical max pixelclock frequency for HDMI <= 1.2 */
+ if (rate > 165000000)
+ return MODE_CLOCK_HIGH;
+ rounded_rate = clk_round_rate(hdmi->tmds_clk, rate);
+ if (rounded_rate > 0 &&
+ max_t(unsigned long, rounded_rate, rate) -
+ min_t(unsigned long, rounded_rate, rate) < diff)
+ return MODE_OK;
+ return MODE_NOCLOCK;
+}
+
static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
.atomic_check = sun4i_hdmi_atomic_check,
.disable = sun4i_hdmi_disable,
.enable = sun4i_hdmi_enable,
.mode_set = sun4i_hdmi_mode_set,
+ .mode_valid = sun4i_hdmi_mode_valid,
};
static const struct drm_encoder_funcs sun4i_hdmi_funcs = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index dc332ea..3ecffa5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -102,10 +102,13 @@ static int sun4i_tmds_determine_rate(struct clk_hw *hw,
goto out;
}
- if (abs(rate - rounded / i) <
- abs(rate - best_parent / best_div)) {
+ if (!best_parent ||
+ abs(rate - rounded / i / j) <
+ abs(rate - best_parent / best_half /
+ best_div)) {
best_parent = rounded;
- best_div = i;
+ best_half = i;
+ best_div = j;
}
}
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index e122f5b..f4284b5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -724,12 +724,12 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
if (IS_ERR(tcon->crtc)) {
dev_err(dev, "Couldn't create our CRTC\n");
ret = PTR_ERR(tcon->crtc);
- goto err_free_clocks;
+ goto err_free_dotclock;
}
ret = sun4i_rgb_init(drm, tcon);
if (ret < 0)
- goto err_free_clocks;
+ goto err_free_dotclock;
if (tcon->quirks->needs_de_be_mux) {
/*
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index b0a1ded..476079f 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2656,6 +2656,9 @@ static int tegra_sor_probe(struct platform_device *pdev)
name, err);
goto remove;
}
+ } else {
+ /* fall back to the module clock on SOR0 (eDP/LVDS only) */
+ sor->clk_out = sor->clk;
}
sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 44343a2..5d252fb 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -455,6 +455,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
freed += (nr_free_pool - shrink_pages) << pool->order;
if (freed >= sc->nr_to_scan)
break;
+ shrink_pages <<= pool->order;
}
mutex_unlock(&lock);
return freed;
@@ -543,7 +544,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int r = 0;
unsigned i, j, cpages;
unsigned npages = 1 << order;
- unsigned max_cpages = min(count, (unsigned)NUM_PAGES_TO_ALLOC);
+ unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
/* allocate array for page caching change */
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
@@ -1006,6 +1007,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
pr_info("Initializing pool allocator\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+ if (!_manager)
+ return -ENOMEM;
ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 6c32c89..c94cce9 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev)
struct vc4_exec_info *exec[2];
struct vc4_bo *bo;
unsigned long irqflags;
- unsigned int i, j, unref_list_count, prev_idx;
+ unsigned int i, j, k, unref_list_count;
kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
if (!kernel_state)
@@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev)
return;
}
- prev_idx = 0;
+ k = 0;
for (i = 0; i < 2; i++) {
if (!exec[i])
continue;
@@ -197,7 +197,7 @@ vc4_save_hang_state(struct drm_device *dev)
WARN_ON(!refcount_read(&bo->usecnt));
refcount_inc(&bo->usecnt);
drm_gem_object_get(&exec[i]->bo[j]->base);
- kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
+ kernel_state->bo[k++] = &exec[i]->bo[j]->base;
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
@@ -205,12 +205,12 @@ vc4_save_hang_state(struct drm_device *dev)
* because they are naturally unpurgeable.
*/
drm_gem_object_get(&bo->base.base);
- kernel_state->bo[j + prev_idx] = &bo->base.base;
- j++;
+ kernel_state->bo[k++] = &bo->base.base;
}
- prev_idx = j + 1;
}
+ WARN_ON_ONCE(k != state->bo_count);
+
if (exec[0])
state->start_bin = exec[0]->ct0ca;
if (exec[1])
@@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev)
VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
}
+static void
+vc4_flush_texture_caches(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ V3D_WRITE(V3D_L2CACTL,
+ V3D_L2CACTL_L2CCLR);
+
+ V3D_WRITE(V3D_SLCACTL,
+ VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
+ VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
+}
+
/* Sets the registers for the next job to be actually be executed in
* the hardware.
*
@@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev)
if (!exec)
return;
+ /* A previous RCL may have written to one of our textures, and
+ * our full cache flush at bin time may have occurred before
+ * that RCL completed. Flush the texture cache now, but not
+ * the instructions or uniforms (since we don't write those
+ * from an RCL).
+ */
+ vc4_flush_texture_caches(dev);
+
submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
}
@@ -888,8 +909,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
/* If we got force-completed because of GPU reset rather than
* through our IRQ handler, signal the fence now.
*/
- if (exec->fence)
+ if (exec->fence) {
dma_fence_signal(exec->fence);
+ dma_fence_put(exec->fence);
+ }
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++) {
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 61b2e53..3dd62d7 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -139,6 +139,7 @@ vc4_irq_finish_render_job(struct drm_device *dev)
list_move_tail(&exec->head, &vc4->job_done_list);
if (exec->fence) {
dma_fence_signal_locked(exec->fence);
+ dma_fence_put(exec->fence);
exec->fence = NULL;
}
vc4_submit_next_render_job(dev);
@@ -208,9 +209,6 @@ vc4_irq_postinstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- /* Undo the effects of a previous vc4_irq_uninstall. */
- enable_irq(dev->irq);
-
/* Enable both the render done and out of memory interrupts. */
V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 622cd43..493f392b 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev)
return ret;
vc4_v3d_init_hw(vc4->dev);
+
+ /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
+ enable_irq(vc4->dev->irq);
vc4_irq_postinstall(vc4->dev);
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 21c62a3..87e8af5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2731,6 +2731,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
}
view_type = vmw_view_cmd_to_type(header->id);
+ if (view_type == vmw_view_max)
+ return -EINVAL;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 0545740..fcd5814 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -697,7 +697,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
vps->pinned = 0;
/* Mapping is managed by prepare_fb/cleanup_fb */
- memset(&vps->guest_map, 0, sizeof(vps->guest_map));
memset(&vps->host_map, 0, sizeof(vps->host_map));
vps->cpp = 0;
@@ -760,11 +759,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
/* Should have been freed by cleanup_fb */
- if (vps->guest_map.virtual) {
- DRM_ERROR("Guest mapping not freed\n");
- ttm_bo_kunmap(&vps->guest_map);
- }
-
if (vps->host_map.virtual) {
DRM_ERROR("Host mapping not freed\n");
ttm_bo_kunmap(&vps->host_map);
@@ -1869,7 +1863,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
*/
int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
- return -ENOSYS;
+ return -EINVAL;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index ff9c838..cd9da2d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -175,7 +175,7 @@ struct vmw_plane_state {
int pinned;
/* For CPU Blit */
- struct ttm_bo_kmap_obj host_map, guest_map;
+ struct ttm_bo_kmap_obj host_map;
unsigned int cpp;
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b8a0980..3824595 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -266,8 +266,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
.set_property = vmw_du_connector_set_property,
.destroy = vmw_ldu_connector_destroy,
.reset = vmw_du_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = vmw_du_connector_duplicate_state,
+ .atomic_destroy_state = vmw_du_connector_destroy_state,
.atomic_set_property = vmw_du_connector_atomic_set_property,
.atomic_get_property = vmw_du_connector_atomic_get_property,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index bc5f602..63a4cd7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -420,8 +420,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.set_property = vmw_du_connector_set_property,
.destroy = vmw_sou_connector_destroy,
.reset = vmw_du_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = vmw_du_connector_duplicate_state,
+ .atomic_destroy_state = vmw_du_connector_destroy_state,
.atomic_set_property = vmw_du_connector_atomic_set_property,
.atomic_get_property = vmw_du_connector_atomic_get_property,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 90b5437..b68d748 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -114,7 +114,7 @@ struct vmw_screen_target_display_unit {
bool defined;
/* For CPU Blit */
- struct ttm_bo_kmap_obj host_map, guest_map;
+ struct ttm_bo_kmap_obj host_map;
unsigned int cpp;
};
@@ -695,7 +695,8 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
s32 src_pitch, dst_pitch;
u8 *src, *dst;
bool not_used;
-
+ struct ttm_bo_kmap_obj guest_map;
+ int ret;
if (!dirty->num_hits)
return;
@@ -706,6 +707,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
if (width == 0 || height == 0)
return;
+ ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages,
+ &guest_map);
+ if (ret) {
+ DRM_ERROR("Failed mapping framebuffer for blit: %d\n",
+ ret);
+ goto out_cleanup;
+ }
/* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
@@ -713,7 +721,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
dst_pitch = ddirty->pitch;
- dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
+ dst = ttm_kmap_obj_virtual(&guest_map, &not_used);
dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
@@ -772,6 +780,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
+ ttm_bo_kunmap(&guest_map);
out_cleanup:
ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
ddirty->right = ddirty->bottom = S32_MIN;
@@ -1109,9 +1118,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- if (vps->guest_map.virtual)
- ttm_bo_kunmap(&vps->guest_map);
-
if (vps->host_map.virtual)
ttm_bo_kunmap(&vps->host_map);
@@ -1277,33 +1283,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
*/
if (vps->content_fb_type == SEPARATE_DMA &&
!(dev_priv->capabilities & SVGA_CAP_3D)) {
-
- struct vmw_framebuffer_dmabuf *new_vfbd;
-
- new_vfbd = vmw_framebuffer_to_vfbd(new_fb);
-
- ret = ttm_bo_reserve(&new_vfbd->buffer->base, false, false,
- NULL);
- if (ret)
- goto out_srf_unpin;
-
- ret = ttm_bo_kmap(&new_vfbd->buffer->base, 0,
- new_vfbd->buffer->base.num_pages,
- &vps->guest_map);
-
- ttm_bo_unreserve(&new_vfbd->buffer->base);
-
- if (ret) {
- DRM_ERROR("Failed to map content buffer to CPU\n");
- goto out_srf_unpin;
- }
-
ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0,
vps->surf->res.backup->base.num_pages,
&vps->host_map);
if (ret) {
DRM_ERROR("Failed to map display buffer to CPU\n");
- ttm_bo_kunmap(&vps->guest_map);
goto out_srf_unpin;
}
@@ -1350,7 +1334,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
stdu->display_srf = vps->surf;
stdu->content_fb_type = vps->content_fb_type;
stdu->cpp = vps->cpp;
- memcpy(&stdu->guest_map, &vps->guest_map, sizeof(vps->guest_map));
memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map));
if (!stdu->defined)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f3fcb83..0c3f608 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -551,7 +551,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
break;
default:
- hid_err(parser->device, "unknown main item tag 0x%x\n", item->tag);
+ hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
ret = 0;
}
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 68cdc96..271f314 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -696,8 +696,16 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
(u8 *)&word, 2);
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
- size = I2C_SMBUS_BLOCK_DATA;
- /* fallthrough */
+ if (read_write == I2C_SMBUS_READ) {
+ read_length = data->block[0];
+ count = cp2112_write_read_req(buf, addr, read_length,
+ command, NULL, 0);
+ } else {
+ count = cp2112_write_req(buf, addr, command,
+ data->block + 1,
+ data->block[0]);
+ }
+ break;
case I2C_SMBUS_BLOCK_DATA:
if (I2C_SMBUS_READ == read_write) {
count = cp2112_write_read_req(buf, addr,
@@ -785,6 +793,9 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
case I2C_SMBUS_WORD_DATA:
data->word = le16_to_cpup((__le16 *)buf);
break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ memcpy(data->block + 1, buf, read_length);
+ break;
case I2C_SMBUS_BLOCK_DATA:
if (read_length > I2C_SMBUS_BLOCK_MAX) {
ret = -EPROTO;
diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
index 9325545..edc0f64 100644
--- a/drivers/hid/hid-holtekff.c
+++ b/drivers/hid/hid-holtekff.c
@@ -32,10 +32,6 @@
#ifdef CONFIG_HOLTEK_FF
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
-MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices");
-
/*
* These commands and parameters are currently known:
*
@@ -223,3 +219,7 @@ static struct hid_driver holtek_driver = {
.probe = holtek_probe,
};
module_hid_driver(holtek_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices");
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 76ed9a2..610223f 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1378,6 +1378,8 @@ void vmbus_device_unregister(struct hv_device *device_obj)
pr_debug("child device %s unregistered\n",
dev_name(&device_obj->device));
+ kset_unregister(device_obj->channels_kset);
+
/*
* Kick off the process of unregistering the device.
* This will call vmbus_remove() and eventually vmbus_device_release()
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7ad0176..ef23553 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -26,11 +26,9 @@ if HWMON
config HWMON_VID
tristate
- default n
config HWMON_DEBUG_CHIP
bool "Hardware Monitoring Chip debugging messages"
- default n
help
Say Y here if you want the I2C chip drivers to produce a bunch of
debug messages to the system log. Select this if you are having
@@ -42,7 +40,6 @@ comment "Native drivers"
config SENSORS_AB8500
tristate "AB8500 thermal monitoring"
depends on AB8500_GPADC && AB8500_BM
- default n
help
If you say yes here you get support for the thermal sensor part
of the AB8500 chip. The driver includes thermal management for
@@ -302,7 +299,6 @@ config SENSORS_APPLESMC
select NEW_LEDS
select LEDS_CLASS
select INPUT_POLLDEV
- default n
help
This driver provides support for the Apple System Management
Controller, which provides an accelerometer (Apple Sudden Motion
@@ -678,7 +674,6 @@ config SENSORS_JC42
config SENSORS_POWR1220
tristate "Lattice POWR1220 Power Monitoring"
depends on I2C
- default n
help
If you say yes here you get access to the hardware monitoring
functions of the Lattice POWR1220 isp Power Supply Monitoring,
@@ -702,7 +697,6 @@ config SENSORS_LTC2945
tristate "Linear Technology LTC2945"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC2945
I2C System Monitor.
@@ -727,7 +721,6 @@ config SENSORS_LTC2990
config SENSORS_LTC4151
tristate "Linear Technology LTC4151"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4151
High Voltage I2C Current and Voltage Monitor interface.
@@ -738,7 +731,6 @@ config SENSORS_LTC4151
config SENSORS_LTC4215
tristate "Linear Technology LTC4215"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4215
Hot Swap Controller I2C interface.
@@ -750,7 +742,6 @@ config SENSORS_LTC4222
tristate "Linear Technology LTC4222"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4222
Dual Hot Swap Controller I2C interface.
@@ -761,7 +752,6 @@ config SENSORS_LTC4222
config SENSORS_LTC4245
tristate "Linear Technology LTC4245"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4245
Multiple Supply Hot Swap Controller I2C interface.
@@ -773,7 +763,6 @@ config SENSORS_LTC4260
tristate "Linear Technology LTC4260"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4260
Positive Voltage Hot Swap Controller I2C interface.
@@ -784,7 +773,6 @@ config SENSORS_LTC4260
config SENSORS_LTC4261
tristate "Linear Technology LTC4261"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4261
Negative Voltage Hot Swap Controller I2C interface.
@@ -1276,7 +1264,6 @@ config SENSORS_NSA320
config SENSORS_PCF8591
tristate "Philips PCF8591 ADC/DAC"
depends on I2C
- default n
help
If you say yes here you get support for Philips PCF8591 4-channel
ADC, 1-channel DAC chips.
@@ -1459,7 +1446,6 @@ config SENSORS_SMSC47B397
config SENSORS_SCH56XX_COMMON
tristate
- default n
config SENSORS_SCH5627
tristate "SMSC SCH5627"
@@ -1505,7 +1491,6 @@ config SENSORS_STTS751
config SENSORS_SMM665
tristate "Summit Microelectronics SMM665"
depends on I2C
- default n
help
If you say yes here you get support for the hardware monitoring
features of the Summit Microelectronics SMM665/SMM665B Six-Channel
@@ -1725,6 +1710,16 @@ config SENSORS_VT8231
This driver can also be built as a module. If so, the module
will be called vt8231.
+config SENSORS_W83773G
+ tristate "Nuvoton W83773G"
+ depends on I2C
+ help
+ If you say yes here you get support for the Nuvoton W83773G hardware
+ monitoring chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called w83773g.
+
config SENSORS_W83781D
tristate "Winbond W83781D, W83782D, W83783S, Asus AS99127F"
depends on I2C
@@ -1782,7 +1777,6 @@ config SENSORS_W83795
config SENSORS_W83795_FANCTRL
bool "Include automatic fan control support (DANGEROUS)"
depends on SENSORS_W83795
- default n
help
If you say yes here, support for automatic fan speed control
will be included in the driver.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 0fe489f..f814b4a 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_SENSORS_ATK0110) += asus_atk0110.o
# asb100, then w83781d go first, as they can override other drivers' addresses.
obj-$(CONFIG_SENSORS_ASB100) += asb100.o
obj-$(CONFIG_SENSORS_W83627HF) += w83627hf.o
+obj-$(CONFIG_SENSORS_W83773G) += w83773g.o
obj-$(CONFIG_SENSORS_W83792D) += w83792d.o
obj-$(CONFIG_SENSORS_W83793) += w83793.o
obj-$(CONFIG_SENSORS_W83795) += w83795.o
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 63a95e2..693a3d5 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include <linux/sysfs.h>
#include <linux/thermal.h>
@@ -181,6 +182,7 @@ struct aspeed_cooling_device {
struct aspeed_pwm_tacho_data {
struct regmap *regmap;
+ struct reset_control *rst;
unsigned long clk_freq;
bool pwm_present[8];
bool fan_tach_present[16];
@@ -905,6 +907,13 @@ static int aspeed_create_fan(struct device *dev,
return 0;
}
+static void aspeed_pwm_tacho_remove(void *data)
+{
+ struct aspeed_pwm_tacho_data *priv = data;
+
+ reset_control_assert(priv->rst);
+}
+
static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -931,6 +940,19 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
&aspeed_pwm_tacho_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
+
+ priv->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ dev_err(dev,
+ "missing or invalid reset controller device tree entry");
+ return PTR_ERR(priv->rst);
+ }
+ reset_control_deassert(priv->rst);
+
+ ret = devm_add_action_or_reset(dev, aspeed_pwm_tacho_remove, priv);
+ if (ret)
+ return ret;
+
regmap_write(priv->regmap, ASPEED_PTCR_TACH_SOURCE, 0);
regmap_write(priv->regmap, ASPEED_PTCR_TACH_SOURCE_EXT, 0);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index c13a4fd..4bdbf77 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -246,7 +246,8 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
int err;
u32 eax, edx;
int i;
- struct pci_dev *host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ u16 devfn = PCI_DEVFN(0, 0);
+ struct pci_dev *host_bridge = pci_get_domain_bus_and_slot(0, 0, devfn);
/*
* Explicit tjmax table entries override heuristics.
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index c7c9e95..bf3bb7e 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -76,6 +76,7 @@ static uint i8k_fan_mult = I8K_FAN_MULT;
static uint i8k_pwm_mult;
static uint i8k_fan_max = I8K_FAN_HIGH;
static bool disallow_fan_type_call;
+static bool disallow_fan_support;
#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
@@ -242,6 +243,9 @@ static int i8k_get_fan_status(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_FAN, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : regs.eax & 0xff;
}
@@ -253,6 +257,9 @@ static int i8k_get_fan_speed(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : (regs.eax & 0xffff) * i8k_fan_mult;
}
@@ -264,7 +271,7 @@ static int _i8k_get_fan_type(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
- if (disallow_fan_type_call)
+ if (disallow_fan_support || disallow_fan_type_call)
return -EINVAL;
regs.ebx = fan & 0xff;
@@ -289,6 +296,9 @@ static int i8k_get_fan_nominal_speed(int fan, int speed)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_NOM_SPEED, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = (fan & 0xff) | (speed << 8);
return i8k_smm(&regs) ? : (regs.eax & 0xffff) * i8k_fan_mult;
}
@@ -300,6 +310,9 @@ static int i8k_set_fan(int fan, int speed)
{
struct smm_regs regs = { .eax = I8K_SMM_SET_FAN, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
speed = (speed < 0) ? 0 : ((speed > i8k_fan_max) ? i8k_fan_max : speed);
regs.ebx = (fan & 0xff) | (speed << 8);
@@ -772,6 +785,8 @@ static struct attribute *i8k_attrs[] = {
static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
+ if (disallow_fan_support && index >= 8)
+ return 0;
if (disallow_fan_type_call &&
(index == 9 || index == 12 || index == 15))
return 0;
@@ -1039,6 +1054,30 @@ static const struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initconst
};
/*
+ * On some machines all fan related SMM functions implemented by Dell BIOS
+ * firmware freeze kernel for about 500ms. Until Dell fixes these problems fan
+ * support for affected blacklisted Dell machines stay disabled.
+ * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=195751
+ */
+static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
+ {
+ .ident = "Dell Inspiron 7720",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 7720"),
+ },
+ },
+ {
+ .ident = "Dell Vostro 3360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
+ },
+ },
+ { }
+};
+
+/*
* Probe for the presence of a supported laptop.
*/
static int __init i8k_probe(void)
@@ -1060,8 +1099,17 @@ static int __init i8k_probe(void)
i8k_get_dmi_data(DMI_BIOS_VERSION));
}
- if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
- disallow_fan_type_call = true;
+ if (dmi_check_system(i8k_blacklist_fan_support_dmi_table)) {
+ pr_warn("broken Dell BIOS detected, disallow fan support\n");
+ if (!force)
+ disallow_fan_support = true;
+ }
+
+ if (dmi_check_system(i8k_blacklist_fan_type_dmi_table)) {
+ pr_warn("broken Dell BIOS detected, disallow fan type call\n");
+ if (!force)
+ disallow_fan_type_call = true;
+ }
strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
sizeof(bios_version));
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index 7b73d20..0ae1ee1 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -37,7 +37,7 @@
/**
* struct hih6130 - HIH-6130 device specific data
- * @hwmon_dev: device registered with hwmon
+ * @client: pointer to I2C client device
* @lock: mutex to protect measurement values
* @valid: only false before first measurement is taken
* @last_update: time of last update (jiffies)
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index c9790e2..32083e4 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -143,6 +143,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
struct hwmon_device *hwdev, int index)
{
struct hwmon_thermal_data *tdata;
+ struct thermal_zone_device *tzd;
tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL);
if (!tdata)
@@ -151,8 +152,14 @@ static int hwmon_thermal_add_sensor(struct device *dev,
tdata->hwdev = hwdev;
tdata->index = index;
- devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
- &hwmon_thermal_ops);
+ tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
+ &hwmon_thermal_ops);
+ /*
+ * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
+ * so ignore that error but forward any other error.
+ */
+ if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
+ return PTR_ERR(tzd);
return 0;
}
@@ -621,14 +628,20 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
if (!chip->ops->is_visible(drvdata, hwmon_temp,
hwmon_temp_input, j))
continue;
- if (info[i]->config[j] & HWMON_T_INPUT)
- hwmon_thermal_add_sensor(dev, hwdev, j);
+ if (info[i]->config[j] & HWMON_T_INPUT) {
+ err = hwmon_thermal_add_sensor(dev,
+ hwdev, j);
+ if (err)
+ goto free_device;
+ }
}
}
}
return hdev;
+free_device:
+ device_unregister(hdev);
free_hwmon:
kfree(hwdev);
ida_remove:
@@ -665,7 +678,7 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
* @dev: the parent device
* @name: hwmon name attribute
* @drvdata: driver data to attach to created device
- * @info: pointer to hwmon chip information
+ * @chip: pointer to hwmon chip information
* @extra_groups: pointer to list of additional non-standard attribute groups
*
* hwmon_device_unregister() must be called when the device is no
@@ -772,11 +785,11 @@ EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups);
/**
* devm_hwmon_device_register_with_info - register w/ hwmon
- * @dev: the parent device
- * @name: hwmon name attribute
- * @drvdata: driver data to attach to created device
- * @info: Pointer to hwmon chip information
- * @groups - pointer to list of driver specific attribute groups
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @chip: pointer to hwmon chip information
+ * @groups: pointer to list of driver specific attribute groups
*
* Returns the pointer to the new device. The new device is automatically
* unregistered with the parent device.
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index f6a7667..5e5b32a 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -23,7 +23,8 @@
* @channels: filled with array of channels from iio
* @num_channels: number of channels in channels (saves counting twice)
* @hwmon_dev: associated hwmon device
- * @attr_group: the group of attributes
+ * @attr_group: the group of attributes
+ * @groups: null terminated array of attribute groups
* @attrs: null terminated array of attribute pointers.
*/
struct iio_hwmon_state {
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 62e38fa..e9e6aea 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -95,18 +95,20 @@ enum ina2xx_ids { ina219, ina226 };
struct ina2xx_config {
u16 config_default;
- int calibration_factor;
+ int calibration_value;
int registers;
int shunt_div;
int bus_voltage_shift;
int bus_voltage_lsb; /* uV */
- int power_lsb; /* uW */
+ int power_lsb_factor;
};
struct ina2xx_data {
const struct ina2xx_config *config;
long rshunt;
+ long current_lsb_uA;
+ long power_lsb_uW;
struct mutex config_lock;
struct regmap *regmap;
@@ -116,21 +118,21 @@ struct ina2xx_data {
static const struct ina2xx_config ina2xx_config[] = {
[ina219] = {
.config_default = INA219_CONFIG_DEFAULT,
- .calibration_factor = 40960000,
+ .calibration_value = 4096,
.registers = INA219_REGISTERS,
.shunt_div = 100,
.bus_voltage_shift = 3,
.bus_voltage_lsb = 4000,
- .power_lsb = 20000,
+ .power_lsb_factor = 20,
},
[ina226] = {
.config_default = INA226_CONFIG_DEFAULT,
- .calibration_factor = 5120000,
+ .calibration_value = 2048,
.registers = INA226_REGISTERS,
.shunt_div = 400,
.bus_voltage_shift = 0,
.bus_voltage_lsb = 1250,
- .power_lsb = 25000,
+ .power_lsb_factor = 25,
},
};
@@ -169,12 +171,16 @@ static u16 ina226_interval_to_reg(int interval)
return INA226_SHIFT_AVG(avg_bits);
}
+/*
+ * Calibration register is set to the best value, which eliminates
+ * truncation errors on calculating current register in hardware.
+ * According to datasheet (eq. 3) the best values are 2048 for
+ * ina226 and 4096 for ina219. They are hardcoded as calibration_value.
+ */
static int ina2xx_calibrate(struct ina2xx_data *data)
{
- u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- data->rshunt);
-
- return regmap_write(data->regmap, INA2XX_CALIBRATION, val);
+ return regmap_write(data->regmap, INA2XX_CALIBRATION,
+ data->config->calibration_value);
}
/*
@@ -187,10 +193,6 @@ static int ina2xx_init(struct ina2xx_data *data)
if (ret < 0)
return ret;
- /*
- * Set current LSB to 1mA, shunt is in uOhms
- * (equation 13 in datasheet).
- */
return ina2xx_calibrate(data);
}
@@ -268,15 +270,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_POWER:
- val = regval * data->config->power_lsb;
+ val = regval * data->power_lsb_uW;
break;
case INA2XX_CURRENT:
- /* signed register, LSB=1mA (selected), in mA */
- val = (s16)regval;
+ /* signed register, result in mA */
+ val = regval * data->current_lsb_uA;
+ val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_CALIBRATION:
- val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- regval);
+ val = regval;
break;
default:
/* programmer goofed */
@@ -304,9 +306,32 @@ static ssize_t ina2xx_show_value(struct device *dev,
ina2xx_get_value(data, attr->index, regval));
}
-static ssize_t ina2xx_set_shunt(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+/*
+ * In order to keep calibration register value fixed, the product
+ * of current_lsb and shunt_resistor should also be fixed and equal
+ * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order
+ * to keep the scale.
+ */
+static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
+{
+ unsigned int dividend = DIV_ROUND_CLOSEST(1000000000,
+ data->config->shunt_div);
+ if (val <= 0 || val > dividend)
+ return -EINVAL;
+
+ mutex_lock(&data->config_lock);
+ data->rshunt = val;
+ data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val);
+ data->power_lsb_uW = data->config->power_lsb_factor *
+ data->current_lsb_uA;
+ mutex_unlock(&data->config_lock);
+
+ return 0;
+}
+
+static ssize_t ina2xx_store_shunt(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
{
unsigned long val;
int status;
@@ -316,18 +341,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev,
if (status < 0)
return status;
- if (val == 0 ||
- /* Values greater than the calibration factor make no sense. */
- val > data->config->calibration_factor)
- return -EINVAL;
-
- mutex_lock(&data->config_lock);
- data->rshunt = val;
- status = ina2xx_calibrate(data);
- mutex_unlock(&data->config_lock);
+ status = ina2xx_set_shunt(data, val);
if (status < 0)
return status;
-
return count;
}
@@ -387,7 +403,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
- ina2xx_show_value, ina2xx_set_shunt,
+ ina2xx_show_value, ina2xx_store_shunt,
INA2XX_CALIBRATION);
/* update interval (ina226 only) */
@@ -438,6 +454,7 @@ static int ina2xx_probe(struct i2c_client *client,
/* set the device type */
data->config = &ina2xx_config[chip];
+ mutex_init(&data->config_lock);
if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
@@ -448,10 +465,7 @@ static int ina2xx_probe(struct i2c_client *client,
val = INA2XX_RSHUNT_DEFAULT;
}
- if (val <= 0 || val > data->config->calibration_factor)
- return -ENODEV;
-
- data->rshunt = val;
+ ina2xx_set_shunt(data, val);
ina2xx_regmap_config.max_register = data->config->registers;
@@ -467,8 +481,6 @@ static int ina2xx_probe(struct i2c_client *client,
return -ENODEV;
}
- mutex_init(&data->config_lock);
-
data->groups[group++] = &ina2xx_group;
if (id->driver_data == ina226)
data->groups[group++] = &ina226_group;
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 0721e17..06b4e1c 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -86,6 +86,7 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen 7 1800X", 20000 },
{ 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
{ 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
+ { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
{ 0x17, "AMD Ryzen Threadripper 1950", 10000 },
{ 0x17, "AMD Ryzen Threadripper 1920", 10000 },
{ 0x17, "AMD Ryzen Threadripper 1910", 10000 },
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 005ffb5..49f4b33 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -100,7 +100,7 @@ static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
switch (attr) {
case hwmon_chip_update_interval:
*val = data->sample_time;
- break;;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 0847900..6e4298e 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -39,6 +39,7 @@ config SENSORS_ADM1275
config SENSORS_IBM_CFFPS
tristate "IBM Common Form Factor Power Supply"
+ depends on LEDS_CLASS
help
If you say yes here you get hardware monitoring support for the IBM
Common Form Factor power supply.
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index cb56da6..93d9a9e 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -8,12 +8,29 @@
*/
#include <linux/bitops.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/fs.h>
#include <linux/i2c.h>
+#include <linux/jiffies.h>
+#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pmbus.h>
#include "pmbus.h"
+#define CFFPS_FRU_CMD 0x9A
+#define CFFPS_PN_CMD 0x9B
+#define CFFPS_SN_CMD 0x9E
+#define CFFPS_CCIN_CMD 0xBD
+#define CFFPS_FW_CMD_START 0xFA
+#define CFFPS_FW_NUM_BYTES 4
+#define CFFPS_SYS_CONFIG_CMD 0xDA
+
+#define CFFPS_INPUT_HISTORY_CMD 0xD6
+#define CFFPS_INPUT_HISTORY_SIZE 100
+
/* STATUS_MFR_SPECIFIC bits */
#define CFFPS_MFR_FAN_FAULT BIT(0)
#define CFFPS_MFR_THERMAL_FAULT BIT(1)
@@ -24,6 +41,153 @@
#define CFFPS_MFR_VAUX_FAULT BIT(6)
#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
+#define CFFPS_LED_BLINK BIT(0)
+#define CFFPS_LED_ON BIT(1)
+#define CFFPS_LED_OFF BIT(2)
+#define CFFPS_BLINK_RATE_MS 250
+
+enum {
+ CFFPS_DEBUGFS_INPUT_HISTORY = 0,
+ CFFPS_DEBUGFS_FRU,
+ CFFPS_DEBUGFS_PN,
+ CFFPS_DEBUGFS_SN,
+ CFFPS_DEBUGFS_CCIN,
+ CFFPS_DEBUGFS_FW,
+ CFFPS_DEBUGFS_NUM_ENTRIES
+};
+
+struct ibm_cffps_input_history {
+ struct mutex update_lock;
+ unsigned long last_update;
+
+ u8 byte_count;
+ u8 data[CFFPS_INPUT_HISTORY_SIZE];
+};
+
+struct ibm_cffps {
+ struct i2c_client *client;
+
+ struct ibm_cffps_input_history input_history;
+
+ int debugfs_entries[CFFPS_DEBUGFS_NUM_ENTRIES];
+
+ char led_name[32];
+ u8 led_state;
+ struct led_classdev led;
+};
+
+#define to_psu(x, y) container_of((x), struct ibm_cffps, debugfs_entries[(y)])
+
+static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ int rc;
+ u8 msgbuf0[1] = { CFFPS_INPUT_HISTORY_CMD };
+ u8 msgbuf1[CFFPS_INPUT_HISTORY_SIZE + 1] = { 0 };
+ struct i2c_msg msg[2] = {
+ {
+ .addr = psu->client->addr,
+ .flags = psu->client->flags,
+ .len = 1,
+ .buf = msgbuf0,
+ }, {
+ .addr = psu->client->addr,
+ .flags = psu->client->flags | I2C_M_RD,
+ .len = CFFPS_INPUT_HISTORY_SIZE + 1,
+ .buf = msgbuf1,
+ },
+ };
+
+ if (!*ppos) {
+ mutex_lock(&psu->input_history.update_lock);
+ if (time_after(jiffies, psu->input_history.last_update + HZ)) {
+ /*
+ * Use a raw i2c transfer, since we need more bytes
+ * than Linux I2C supports through smbus xfr (only 32).
+ */
+ rc = i2c_transfer(psu->client->adapter, msg, 2);
+ if (rc < 0) {
+ mutex_unlock(&psu->input_history.update_lock);
+ return rc;
+ }
+
+ psu->input_history.byte_count = msgbuf1[0];
+ memcpy(psu->input_history.data, &msgbuf1[1],
+ CFFPS_INPUT_HISTORY_SIZE);
+ psu->input_history.last_update = jiffies;
+ }
+
+ mutex_unlock(&psu->input_history.update_lock);
+ }
+
+ return simple_read_from_buffer(buf, count, ppos,
+ psu->input_history.data,
+ psu->input_history.byte_count);
+}
+
+static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u8 cmd;
+ int i, rc;
+ int *idxp = file->private_data;
+ int idx = *idxp;
+ struct ibm_cffps *psu = to_psu(idxp, idx);
+ char data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+
+ switch (idx) {
+ case CFFPS_DEBUGFS_INPUT_HISTORY:
+ return ibm_cffps_read_input_history(psu, buf, count, ppos);
+ case CFFPS_DEBUGFS_FRU:
+ cmd = CFFPS_FRU_CMD;
+ break;
+ case CFFPS_DEBUGFS_PN:
+ cmd = CFFPS_PN_CMD;
+ break;
+ case CFFPS_DEBUGFS_SN:
+ cmd = CFFPS_SN_CMD;
+ break;
+ case CFFPS_DEBUGFS_CCIN:
+ rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD);
+ if (rc < 0)
+ return rc;
+
+ rc = snprintf(data, 5, "%04X", rc);
+ goto done;
+ case CFFPS_DEBUGFS_FW:
+ for (i = 0; i < CFFPS_FW_NUM_BYTES; ++i) {
+ rc = i2c_smbus_read_byte_data(psu->client,
+ CFFPS_FW_CMD_START + i);
+ if (rc < 0)
+ return rc;
+
+ snprintf(&data[i * 2], 3, "%02X", rc);
+ }
+
+ rc = i * 2;
+ goto done;
+ default:
+ return -EINVAL;
+ }
+
+ rc = i2c_smbus_read_block_data(psu->client, cmd, data);
+ if (rc < 0)
+ return rc;
+
+done:
+ data[rc] = '\n';
+ rc += 2;
+
+ return simple_read_from_buffer(buf, count, ppos, data, rc);
+}
+
+static const struct file_operations ibm_cffps_fops = {
+ .llseek = noop_llseek,
+ .read = ibm_cffps_debugfs_op,
+ .open = simple_open,
+};
+
static int ibm_cffps_read_byte_data(struct i2c_client *client, int page,
int reg)
{
@@ -105,6 +269,69 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
return rc;
}
+static void ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ int rc;
+ struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
+
+ if (brightness == LED_OFF) {
+ psu->led_state = CFFPS_LED_OFF;
+ } else {
+ brightness = LED_FULL;
+ if (psu->led_state != CFFPS_LED_BLINK)
+ psu->led_state = CFFPS_LED_ON;
+ }
+
+ rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
+ psu->led_state);
+ if (rc < 0)
+ return;
+
+ led_cdev->brightness = brightness;
+}
+
+static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ int rc;
+ struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
+
+ psu->led_state = CFFPS_LED_BLINK;
+
+ if (led_cdev->brightness == LED_OFF)
+ return 0;
+
+ rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
+ CFFPS_LED_BLINK);
+ if (rc < 0)
+ return rc;
+
+ *delay_on = CFFPS_BLINK_RATE_MS;
+ *delay_off = CFFPS_BLINK_RATE_MS;
+
+ return 0;
+}
+
+static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
+{
+ int rc;
+ struct i2c_client *client = psu->client;
+ struct device *dev = &client->dev;
+
+ snprintf(psu->led_name, sizeof(psu->led_name), "%s-%02x", client->name,
+ client->addr);
+ psu->led.name = psu->led_name;
+ psu->led.max_brightness = LED_FULL;
+ psu->led.brightness_set = ibm_cffps_led_brightness_set;
+ psu->led.blink_set = ibm_cffps_led_blink_set;
+
+ rc = devm_led_classdev_register(dev, &psu->led);
+ if (rc)
+ dev_warn(dev, "failed to register led class: %d\n", rc);
+}
+
static struct pmbus_driver_info ibm_cffps_info = {
.pages = 1,
.func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
@@ -116,10 +343,69 @@ static struct pmbus_driver_info ibm_cffps_info = {
.read_word_data = ibm_cffps_read_word_data,
};
+static struct pmbus_platform_data ibm_cffps_pdata = {
+ .flags = PMBUS_SKIP_STATUS_CHECK,
+};
+
static int ibm_cffps_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- return pmbus_do_probe(client, id, &ibm_cffps_info);
+ int i, rc;
+ struct dentry *debugfs;
+ struct dentry *ibm_cffps_dir;
+ struct ibm_cffps *psu;
+
+ client->dev.platform_data = &ibm_cffps_pdata;
+ rc = pmbus_do_probe(client, id, &ibm_cffps_info);
+ if (rc)
+ return rc;
+
+ /*
+ * Don't fail the probe if there isn't enough memory for leds and
+ * debugfs.
+ */
+ psu = devm_kzalloc(&client->dev, sizeof(*psu), GFP_KERNEL);
+ if (!psu)
+ return 0;
+
+ psu->client = client;
+ mutex_init(&psu->input_history.update_lock);
+ psu->input_history.last_update = jiffies - HZ;
+
+ ibm_cffps_create_led_class(psu);
+
+ /* Don't fail the probe if we can't create debugfs */
+ debugfs = pmbus_get_debugfs_dir(client);
+ if (!debugfs)
+ return 0;
+
+ ibm_cffps_dir = debugfs_create_dir(client->name, debugfs);
+ if (!ibm_cffps_dir)
+ return 0;
+
+ for (i = 0; i < CFFPS_DEBUGFS_NUM_ENTRIES; ++i)
+ psu->debugfs_entries[i] = i;
+
+ debugfs_create_file("input_history", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_INPUT_HISTORY],
+ &ibm_cffps_fops);
+ debugfs_create_file("fru", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_FRU],
+ &ibm_cffps_fops);
+ debugfs_create_file("part_number", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_PN],
+ &ibm_cffps_fops);
+ debugfs_create_file("serial_number", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_SN],
+ &ibm_cffps_fops);
+ debugfs_create_file("ccin", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_CCIN],
+ &ibm_cffps_fops);
+ debugfs_create_file("fw_version", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_FW],
+ &ibm_cffps_fops);
+
+ return 0;
}
static const struct i2c_device_id ibm_cffps_id[] = {
diff --git a/drivers/hwmon/pmbus/ir35221.c b/drivers/hwmon/pmbus/ir35221.c
index 8b906b4..977315b 100644
--- a/drivers/hwmon/pmbus/ir35221.c
+++ b/drivers/hwmon/pmbus/ir35221.c
@@ -25,168 +25,19 @@
#define IR35221_MFR_IOUT_VALLEY 0xcb
#define IR35221_MFR_TEMP_VALLEY 0xcc
-static long ir35221_reg2data(int data, enum pmbus_sensor_classes class)
-{
- s16 exponent;
- s32 mantissa;
- long val;
-
- /* We only modify LINEAR11 formats */
- exponent = ((s16)data) >> 11;
- mantissa = ((s16)((data & 0x7ff) << 5)) >> 5;
-
- val = mantissa * 1000L;
-
- /* scale result to micro-units for power sensors */
- if (class == PSC_POWER)
- val = val * 1000L;
-
- if (exponent >= 0)
- val <<= exponent;
- else
- val >>= -exponent;
-
- return val;
-}
-
-#define MAX_MANTISSA (1023 * 1000)
-#define MIN_MANTISSA (511 * 1000)
-
-static u16 ir35221_data2reg(long val, enum pmbus_sensor_classes class)
-{
- s16 exponent = 0, mantissa;
- bool negative = false;
-
- if (val == 0)
- return 0;
-
- if (val < 0) {
- negative = true;
- val = -val;
- }
-
- /* Power is in uW. Convert to mW before converting. */
- if (class == PSC_POWER)
- val = DIV_ROUND_CLOSEST(val, 1000L);
-
- /* Reduce large mantissa until it fits into 10 bit */
- while (val >= MAX_MANTISSA && exponent < 15) {
- exponent++;
- val >>= 1;
- }
- /* Increase small mantissa to improve precision */
- while (val < MIN_MANTISSA && exponent > -15) {
- exponent--;
- val <<= 1;
- }
-
- /* Convert mantissa from milli-units to units */
- mantissa = DIV_ROUND_CLOSEST(val, 1000);
-
- /* Ensure that resulting number is within range */
- if (mantissa > 0x3ff)
- mantissa = 0x3ff;
-
- /* restore sign */
- if (negative)
- mantissa = -mantissa;
-
- /* Convert to 5 bit exponent, 11 bit mantissa */
- return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
-}
-
-static u16 ir35221_scale_result(s16 data, int shift,
- enum pmbus_sensor_classes class)
-{
- long val;
-
- val = ir35221_reg2data(data, class);
-
- if (shift < 0)
- val >>= -shift;
- else
- val <<= shift;
-
- return ir35221_data2reg(val, class);
-}
-
static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
{
int ret;
switch (reg) {
- case PMBUS_IOUT_OC_FAULT_LIMIT:
- case PMBUS_IOUT_OC_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, 1, PSC_CURRENT_OUT);
- break;
- case PMBUS_VIN_OV_FAULT_LIMIT:
- case PMBUS_VIN_OV_WARN_LIMIT:
- case PMBUS_VIN_UV_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- ret = ir35221_scale_result(ret, -4, PSC_VOLTAGE_IN);
- break;
- case PMBUS_IIN_OC_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- break;
- case PMBUS_READ_VIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_VIN);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
- break;
- case PMBUS_READ_IIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_IIN);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -4, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -5, PSC_CURRENT_IN);
- break;
- case PMBUS_READ_POUT:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_POUT);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_POWER);
- break;
- case PMBUS_READ_PIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_PIN);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_POWER);
- break;
- case PMBUS_READ_IOUT:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_IOUT);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_OUT);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_OUT);
- break;
case PMBUS_VIRT_READ_VIN_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_VIN_PEAK);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
break;
case PMBUS_VIRT_READ_VOUT_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_IOUT_PEAK);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_IN);
break;
case PMBUS_VIRT_READ_TEMP_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_TEMP_PEAK);
@@ -194,9 +45,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_READ_VIN_MIN:
ret = pmbus_read_word_data(client, page,
IR35221_MFR_VIN_VALLEY);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
break;
case PMBUS_VIRT_READ_VOUT_MIN:
ret = pmbus_read_word_data(client, page,
@@ -205,12 +53,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_READ_IOUT_MIN:
ret = pmbus_read_word_data(client, page,
IR35221_MFR_IOUT_VALLEY);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_IN);
break;
case PMBUS_VIRT_READ_TEMP_MIN:
ret = pmbus_read_word_data(client, page,
@@ -224,36 +66,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
-static int ir35221_write_word_data(struct i2c_client *client, int page, int reg,
- u16 word)
-{
- int ret;
- u16 val;
-
- switch (reg) {
- case PMBUS_IOUT_OC_FAULT_LIMIT:
- case PMBUS_IOUT_OC_WARN_LIMIT:
- val = ir35221_scale_result(word, -1, PSC_CURRENT_OUT);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- case PMBUS_VIN_OV_FAULT_LIMIT:
- case PMBUS_VIN_OV_WARN_LIMIT:
- case PMBUS_VIN_UV_WARN_LIMIT:
- val = ir35221_scale_result(word, 4, PSC_VOLTAGE_IN);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- case PMBUS_IIN_OC_WARN_LIMIT:
- val = ir35221_scale_result(word, 1, PSC_CURRENT_IN);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- default:
- ret = -ENODATA;
- break;
- }
-
- return ret;
-}
-
static int ir35221_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -292,7 +104,6 @@ static int ir35221_probe(struct i2c_client *client,
if (!info)
return -ENOMEM;
- info->write_word_data = ir35221_write_word_data;
info->read_word_data = ir35221_read_word_data;
info->pages = 2;
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 10d17fb..53db787 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -1,5 +1,5 @@
/*
- * Hardware monitoring driver for LM25056 / LM25063 / LM25066 / LM5064 / LM5066
+ * Hardware monitoring driver for LM25056 / LM25066 / LM5064 / LM5066
*
* Copyright (c) 2011 Ericsson AB.
* Copyright (c) 2013 Guenter Roeck
@@ -28,7 +28,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { lm25056, lm25063, lm25066, lm5064, lm5066, lm5066i };
+enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
#define LM25066_READ_VAUX 0xd0
#define LM25066_MFR_READ_IIN 0xd1
@@ -53,11 +53,6 @@ enum chips { lm25056, lm25063, lm25066, lm5064, lm5066, lm5066i };
#define LM25056_MFR_STS_VAUX_OV_WARN BIT(1)
#define LM25056_MFR_STS_VAUX_UV_WARN BIT(0)
-/* LM25063 only */
-
-#define LM25063_READ_VOUT_MAX 0xe5
-#define LM25063_READ_VOUT_MIN 0xe6
-
struct __coeff {
short m, b, R;
};
@@ -122,36 +117,6 @@ static struct __coeff lm25066_coeff[6][PSC_NUM_CLASSES + 2] = {
.m = 16,
},
},
- [lm25063] = {
- [PSC_VOLTAGE_IN] = {
- .m = 16000,
- .R = -2,
- },
- [PSC_VOLTAGE_OUT] = {
- .m = 16000,
- .R = -2,
- },
- [PSC_CURRENT_IN] = {
- .m = 10000,
- .R = -2,
- },
- [PSC_CURRENT_IN_L] = {
- .m = 10000,
- .R = -2,
- },
- [PSC_POWER] = {
- .m = 5000,
- .R = -3,
- },
- [PSC_POWER_L] = {
- .m = 5000,
- .R = -3,
- },
- [PSC_TEMPERATURE] = {
- .m = 15596,
- .R = -3,
- },
- },
[lm5064] = {
[PSC_VOLTAGE_IN] = {
.m = 4611,
@@ -272,10 +237,6 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
/* VIN: 6.14 mV VAUX: 293 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 293, 6140);
break;
- case lm25063:
- /* VIN: 6.25 mV VAUX: 200.0 uV LSB */
- ret = DIV_ROUND_CLOSEST(ret * 20, 625);
- break;
case lm25066:
/* VIN: 4.54 mV VAUX: 283.2 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 2832, 45400);
@@ -330,24 +291,6 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
-static int lm25063_read_word_data(struct i2c_client *client, int page, int reg)
-{
- int ret;
-
- switch (reg) {
- case PMBUS_VIRT_READ_VOUT_MAX:
- ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MAX);
- break;
- case PMBUS_VIRT_READ_VOUT_MIN:
- ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MIN);
- break;
- default:
- ret = lm25066_read_word_data(client, page, reg);
- break;
- }
- return ret;
-}
-
static int lm25056_read_word_data(struct i2c_client *client, int page, int reg)
{
int ret;
@@ -502,11 +445,6 @@ static int lm25066_probe(struct i2c_client *client,
info->read_word_data = lm25056_read_word_data;
info->read_byte_data = lm25056_read_byte_data;
data->rlimit = 0x0fff;
- } else if (data->id == lm25063) {
- info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
- | PMBUS_HAVE_POUT;
- info->read_word_data = lm25063_read_word_data;
- data->rlimit = 0xffff;
} else {
info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
info->read_word_data = lm25066_read_word_data;
@@ -543,7 +481,6 @@ static int lm25066_probe(struct i2c_client *client,
static const struct i2c_device_id lm25066_id[] = {
{"lm25056", lm25056},
- {"lm25063", lm25063},
{"lm25066", lm25066},
{"lm5064", lm5064},
{"lm5066", lm5066},
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
index 9313849..c9dc879 100644
--- a/drivers/hwmon/pmbus/max31785.c
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -16,12 +16,231 @@
enum max31785_regs {
MFR_REVISION = 0x9b,
+ MFR_FAN_CONFIG = 0xf1,
};
+#define MAX31785 0x3030
+#define MAX31785A 0x3040
+
+#define MFR_FAN_CONFIG_DUAL_TACH BIT(12)
+
#define MAX31785_NR_PAGES 23
+#define MAX31785_NR_FAN_PAGES 6
+
+static int max31785_read_byte_data(struct i2c_client *client, int page,
+ int reg)
+{
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ return -ENOTSUPP;
+ case PMBUS_FAN_CONFIG_12:
+ return pmbus_read_byte_data(client, page - MAX31785_NR_PAGES,
+ reg);
+ }
+
+ return -ENODATA;
+}
+
+static int max31785_write_byte(struct i2c_client *client, int page, u8 value)
+{
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ return -ENOTSUPP;
+}
+
+static int max31785_read_long_data(struct i2c_client *client, int page,
+ int reg, u32 *data)
+{
+ unsigned char cmdbuf[1];
+ unsigned char rspbuf[4];
+ int rc;
+
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(cmdbuf),
+ .buf = cmdbuf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(rspbuf),
+ .buf = rspbuf,
+ },
+ };
+
+ cmdbuf[0] = reg;
+
+ rc = pmbus_set_page(client, page);
+ if (rc < 0)
+ return rc;
+
+ rc = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (rc < 0)
+ return rc;
+
+ *data = (rspbuf[0] << (0 * 8)) | (rspbuf[1] << (1 * 8)) |
+ (rspbuf[2] << (2 * 8)) | (rspbuf[3] << (3 * 8));
+
+ return rc;
+}
+
+static int max31785_get_pwm(struct i2c_client *client, int page)
+{
+ int rv;
+
+ rv = pmbus_get_fan_rate_device(client, page, 0, percent);
+ if (rv < 0)
+ return rv;
+ else if (rv >= 0x8000)
+ return 0;
+ else if (rv >= 0x2711)
+ return 0x2710;
+
+ return rv;
+}
+
+static int max31785_get_pwm_mode(struct i2c_client *client, int page)
+{
+ int config;
+ int command;
+
+ config = pmbus_read_byte_data(client, page, PMBUS_FAN_CONFIG_12);
+ if (config < 0)
+ return config;
+
+ command = pmbus_read_word_data(client, page, PMBUS_FAN_COMMAND_1);
+ if (command < 0)
+ return command;
+
+ if (config & PB_FAN_1_RPM)
+ return (command >= 0x8000) ? 3 : 2;
+
+ if (command >= 0x8000)
+ return 3;
+ else if (command >= 0x2711)
+ return 0;
+
+ return 1;
+}
+
+static int max31785_read_word_data(struct i2c_client *client, int page,
+ int reg)
+{
+ u32 val;
+ int rv;
+
+ switch (reg) {
+ case PMBUS_READ_FAN_SPEED_1:
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ rv = max31785_read_long_data(client, page - MAX31785_NR_PAGES,
+ reg, &val);
+ if (rv < 0)
+ return rv;
+
+ rv = (val >> 16) & 0xffff;
+ break;
+ case PMBUS_FAN_COMMAND_1:
+ /*
+ * PMBUS_FAN_COMMAND_x is probed to judge whether or not to
+ * expose fan control registers.
+ *
+ * Don't expose fan_target attribute for virtual pages.
+ */
+ rv = (page >= MAX31785_NR_PAGES) ? -ENOTSUPP : -ENODATA;
+ break;
+ case PMBUS_VIRT_PWM_1:
+ rv = max31785_get_pwm(client, page);
+ break;
+ case PMBUS_VIRT_PWM_ENABLE_1:
+ rv = max31785_get_pwm_mode(client, page);
+ break;
+ default:
+ rv = -ENODATA;
+ break;
+ }
+
+ return rv;
+}
+
+static inline u32 max31785_scale_pwm(u32 sensor_val)
+{
+ /*
+ * The datasheet describes the accepted value range for manual PWM as
+ * [0, 0x2710], while the hwmon pwmX sysfs interface accepts values in
+ * [0, 255]. The MAX31785 uses DIRECT mode to scale the FAN_COMMAND
+ * registers and in PWM mode the coefficients are m=1, b=0, R=2. The
+ * important observation here is that 0x2710 == 10000 == 100 * 100.
+ *
+ * R=2 (== 10^2 == 100) accounts for scaling the value provided at the
+ * sysfs interface into the required hardware resolution, but it does
+ * not yet yield a value that we can write to the device (this initial
+ * scaling is handled by pmbus_data2reg()). Multiplying by 100 below
+ * translates the parameter value into the percentage units required by
+ * PMBus, and then we scale back by 255 as required by the hwmon pwmX
+ * interface to yield the percentage value at the appropriate
+ * resolution for hardware.
+ */
+ return (sensor_val * 100) / 255;
+}
+
+static int max31785_pwm_enable(struct i2c_client *client, int page,
+ u16 word)
+{
+ int config = 0;
+ int rate;
+
+ switch (word) {
+ case 0:
+ rate = 0x7fff;
+ break;
+ case 1:
+ rate = pmbus_get_fan_rate_cached(client, page, 0, percent);
+ if (rate < 0)
+ return rate;
+ rate = max31785_scale_pwm(rate);
+ break;
+ case 2:
+ config = PB_FAN_1_RPM;
+ rate = pmbus_get_fan_rate_cached(client, page, 0, rpm);
+ if (rate < 0)
+ return rate;
+ break;
+ case 3:
+ rate = 0xffff;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return pmbus_update_fan(client, page, 0, config, PB_FAN_1_RPM, rate);
+}
+
+static int max31785_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ switch (reg) {
+ case PMBUS_VIRT_PWM_1:
+ return pmbus_update_fan(client, page, 0, 0, PB_FAN_1_RPM,
+ max31785_scale_pwm(word));
+ case PMBUS_VIRT_PWM_ENABLE_1:
+ return max31785_pwm_enable(client, page, word);
+ default:
+ break;
+ }
+
+ return -ENODATA;
+}
#define MAX31785_FAN_FUNCS \
- (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12)
+ (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_PWM12)
#define MAX31785_TEMP_FUNCS \
(PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP)
@@ -29,14 +248,26 @@ enum max31785_regs {
#define MAX31785_VOUT_FUNCS \
(PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT)
+#define MAX37185_NUM_FAN_PAGES 6
+
static const struct pmbus_driver_info max31785_info = {
.pages = MAX31785_NR_PAGES,
+ .write_word_data = max31785_write_word_data,
+ .read_byte_data = max31785_read_byte_data,
+ .read_word_data = max31785_read_word_data,
+ .write_byte = max31785_write_byte,
+
/* RPM */
.format[PSC_FAN] = direct,
.m[PSC_FAN] = 1,
.b[PSC_FAN] = 0,
.R[PSC_FAN] = 0,
+ /* PWM */
+ .format[PSC_PWM] = direct,
+ .m[PSC_PWM] = 1,
+ .b[PSC_PWM] = 0,
+ .R[PSC_PWM] = 2,
.func[0] = MAX31785_FAN_FUNCS,
.func[1] = MAX31785_FAN_FUNCS,
.func[2] = MAX31785_FAN_FUNCS,
@@ -72,13 +303,46 @@ static const struct pmbus_driver_info max31785_info = {
.func[22] = MAX31785_VOUT_FUNCS,
};
+static int max31785_configure_dual_tach(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < MAX31785_NR_FAN_PAGES; i++) {
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_FAN_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MFR_FAN_CONFIG_DUAL_TACH) {
+ int virtual = MAX31785_NR_PAGES + i;
+
+ info->pages = virtual + 1;
+ info->func[virtual] |= PMBUS_HAVE_FAN12;
+ info->func[virtual] |= PMBUS_PAGE_VIRTUAL;
+ }
+ }
+
+ return 0;
+}
+
static int max31785_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct pmbus_driver_info *info;
+ bool dual_tach = false;
s64 ret;
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
info = devm_kzalloc(dev, sizeof(struct pmbus_driver_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -89,6 +353,25 @@ static int max31785_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ ret = i2c_smbus_read_word_data(client, MFR_REVISION);
+ if (ret < 0)
+ return ret;
+
+ if (ret == MAX31785A) {
+ dual_tach = true;
+ } else if (ret == MAX31785) {
+ if (!strcmp("max31785a", id->name))
+ dev_warn(dev, "Expected max3175a, found max31785: cannot provide secondary tachometer readings\n");
+ } else {
+ return -ENODEV;
+ }
+
+ if (dual_tach) {
+ ret = max31785_configure_dual_tach(client, info);
+ if (ret < 0)
+ return ret;
+ }
+
return pmbus_do_probe(client, id, info);
}
@@ -100,9 +383,18 @@ static const struct i2c_device_id max31785_id[] = {
MODULE_DEVICE_TABLE(i2c, max31785_id);
+static const struct of_device_id max31785_of_match[] = {
+ { .compatible = "maxim,max31785" },
+ { .compatible = "maxim,max31785a" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, max31785_of_match);
+
static struct i2c_driver max31785_driver = {
.driver = {
.name = "max31785",
+ .of_match_table = max31785_of_match,
},
.probe = max31785_probe,
.remove = pmbus_do_remove,
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index fa613bd..1d24397 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -190,6 +190,33 @@ enum pmbus_regs {
PMBUS_VIRT_VMON_UV_FAULT_LIMIT,
PMBUS_VIRT_VMON_OV_FAULT_LIMIT,
PMBUS_VIRT_STATUS_VMON,
+
+ /*
+ * RPM and PWM Fan control
+ *
+ * Drivers wanting to expose PWM control must define the behaviour of
+ * PMBUS_VIRT_PWM_[1-4] and PMBUS_VIRT_PWM_ENABLE_[1-4] in the
+ * {read,write}_word_data callback.
+ *
+ * pmbus core provides a default implementation for
+ * PMBUS_VIRT_FAN_TARGET_[1-4].
+ *
+ * TARGET, PWM and PWM_ENABLE members must be defined sequentially;
+ * pmbus core uses the difference between the provided register and
+ * it's _1 counterpart to calculate the FAN/PWM ID.
+ */
+ PMBUS_VIRT_FAN_TARGET_1,
+ PMBUS_VIRT_FAN_TARGET_2,
+ PMBUS_VIRT_FAN_TARGET_3,
+ PMBUS_VIRT_FAN_TARGET_4,
+ PMBUS_VIRT_PWM_1,
+ PMBUS_VIRT_PWM_2,
+ PMBUS_VIRT_PWM_3,
+ PMBUS_VIRT_PWM_4,
+ PMBUS_VIRT_PWM_ENABLE_1,
+ PMBUS_VIRT_PWM_ENABLE_2,
+ PMBUS_VIRT_PWM_ENABLE_3,
+ PMBUS_VIRT_PWM_ENABLE_4,
};
/*
@@ -223,6 +250,8 @@ enum pmbus_regs {
#define PB_FAN_1_RPM BIT(6)
#define PB_FAN_1_INSTALLED BIT(7)
+enum pmbus_fan_mode { percent = 0, rpm };
+
/*
* STATUS_BYTE, STATUS_WORD (lower)
*/
@@ -313,6 +342,7 @@ enum pmbus_sensor_classes {
PSC_POWER,
PSC_TEMPERATURE,
PSC_FAN,
+ PSC_PWM,
PSC_NUM_CLASSES /* Number of power sensor classes */
};
@@ -339,6 +369,10 @@ enum pmbus_sensor_classes {
#define PMBUS_HAVE_STATUS_FAN34 BIT(17)
#define PMBUS_HAVE_VMON BIT(18)
#define PMBUS_HAVE_STATUS_VMON BIT(19)
+#define PMBUS_HAVE_PWM12 BIT(20)
+#define PMBUS_HAVE_PWM34 BIT(21)
+
+#define PMBUS_PAGE_VIRTUAL BIT(31)
enum pmbus_data_format { linear = 0, direct, vid };
enum vrm_version { vr11 = 0, vr12, vr13 };
@@ -421,5 +455,12 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
int pmbus_do_remove(struct i2c_client *client);
const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client
*client);
+int pmbus_get_fan_rate_device(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode);
+int pmbus_get_fan_rate_cached(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode);
+int pmbus_update_fan(struct i2c_client *client, int page, int id,
+ u8 config, u8 mask, u16 command);
+struct dentry *pmbus_get_debugfs_dir(struct i2c_client *client);
#endif /* PMBUS_H */
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index a139940..f7c47d7 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -65,6 +65,7 @@ struct pmbus_sensor {
u16 reg; /* register */
enum pmbus_sensor_classes class; /* sensor class */
bool update; /* runtime sensor update needed */
+ bool convert; /* Whether or not to apply linear/vid/direct */
int data; /* Sensor data.
Negative if there was a read error */
};
@@ -129,6 +130,27 @@ struct pmbus_debugfs_entry {
u8 reg;
};
+static const int pmbus_fan_rpm_mask[] = {
+ PB_FAN_1_RPM,
+ PB_FAN_2_RPM,
+ PB_FAN_1_RPM,
+ PB_FAN_2_RPM,
+};
+
+static const int pmbus_fan_config_registers[] = {
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_34,
+ PMBUS_FAN_CONFIG_34
+};
+
+static const int pmbus_fan_command_registers[] = {
+ PMBUS_FAN_COMMAND_1,
+ PMBUS_FAN_COMMAND_2,
+ PMBUS_FAN_COMMAND_3,
+ PMBUS_FAN_COMMAND_4,
+};
+
void pmbus_clear_cache(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
@@ -140,18 +162,27 @@ EXPORT_SYMBOL_GPL(pmbus_clear_cache);
int pmbus_set_page(struct i2c_client *client, int page)
{
struct pmbus_data *data = i2c_get_clientdata(client);
- int rv = 0;
- int newpage;
+ int rv;
- if (page >= 0 && page != data->currpage) {
+ if (page < 0 || page == data->currpage)
+ return 0;
+
+ if (!(data->info->func[page] & PMBUS_PAGE_VIRTUAL)) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
- newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
- if (newpage != page)
- rv = -EIO;
- else
- data->currpage = page;
+ if (rv < 0)
+ return rv;
+
+ rv = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
+ if (rv < 0)
+ return rv;
+
+ if (rv != page)
+ return -EIO;
}
- return rv;
+
+ data->currpage = page;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(pmbus_set_page);
@@ -198,6 +229,28 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
}
EXPORT_SYMBOL_GPL(pmbus_write_word_data);
+
+static int pmbus_write_virt_reg(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ int bit;
+ int id;
+ int rv;
+
+ switch (reg) {
+ case PMBUS_VIRT_FAN_TARGET_1 ... PMBUS_VIRT_FAN_TARGET_4:
+ id = reg - PMBUS_VIRT_FAN_TARGET_1;
+ bit = pmbus_fan_rpm_mask[id];
+ rv = pmbus_update_fan(client, page, id, bit, bit, word);
+ break;
+ default:
+ rv = -ENXIO;
+ break;
+ }
+
+ return rv;
+}
+
/*
* _pmbus_write_word_data() is similar to pmbus_write_word_data(), but checks if
* a device specific mapping function exists and calls it if necessary.
@@ -214,11 +267,38 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
if (status != -ENODATA)
return status;
}
+
if (reg >= PMBUS_VIRT_BASE)
- return -ENXIO;
+ return pmbus_write_virt_reg(client, page, reg, word);
+
return pmbus_write_word_data(client, page, reg, word);
}
+int pmbus_update_fan(struct i2c_client *client, int page, int id,
+ u8 config, u8 mask, u16 command)
+{
+ int from;
+ int rv;
+ u8 to;
+
+ from = pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[id]);
+ if (from < 0)
+ return from;
+
+ to = (from & ~mask) | (config & mask);
+ if (to != from) {
+ rv = pmbus_write_byte_data(client, page,
+ pmbus_fan_config_registers[id], to);
+ if (rv < 0)
+ return rv;
+ }
+
+ return _pmbus_write_word_data(client, page,
+ pmbus_fan_command_registers[id], command);
+}
+EXPORT_SYMBOL_GPL(pmbus_update_fan);
+
int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
@@ -231,6 +311,24 @@ int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
}
EXPORT_SYMBOL_GPL(pmbus_read_word_data);
+static int pmbus_read_virt_reg(struct i2c_client *client, int page, int reg)
+{
+ int rv;
+ int id;
+
+ switch (reg) {
+ case PMBUS_VIRT_FAN_TARGET_1 ... PMBUS_VIRT_FAN_TARGET_4:
+ id = reg - PMBUS_VIRT_FAN_TARGET_1;
+ rv = pmbus_get_fan_rate_device(client, page, id, rpm);
+ break;
+ default:
+ rv = -ENXIO;
+ break;
+ }
+
+ return rv;
+}
+
/*
* _pmbus_read_word_data() is similar to pmbus_read_word_data(), but checks if
* a device specific mapping function exists and calls it if necessary.
@@ -246,8 +344,10 @@ static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg)
if (status != -ENODATA)
return status;
}
+
if (reg >= PMBUS_VIRT_BASE)
- return -ENXIO;
+ return pmbus_read_virt_reg(client, page, reg);
+
return pmbus_read_word_data(client, page, reg);
}
@@ -312,6 +412,68 @@ static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
return pmbus_read_byte_data(client, page, reg);
}
+static struct pmbus_sensor *pmbus_find_sensor(struct pmbus_data *data, int page,
+ int reg)
+{
+ struct pmbus_sensor *sensor;
+
+ for (sensor = data->sensors; sensor; sensor = sensor->next) {
+ if (sensor->page == page && sensor->reg == reg)
+ return sensor;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int pmbus_get_fan_rate(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode,
+ bool from_cache)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ bool want_rpm, have_rpm;
+ struct pmbus_sensor *s;
+ int config;
+ int reg;
+
+ want_rpm = (mode == rpm);
+
+ if (from_cache) {
+ reg = want_rpm ? PMBUS_VIRT_FAN_TARGET_1 : PMBUS_VIRT_PWM_1;
+ s = pmbus_find_sensor(data, page, reg + id);
+ if (IS_ERR(s))
+ return PTR_ERR(s);
+
+ return s->data;
+ }
+
+ config = pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[id]);
+ if (config < 0)
+ return config;
+
+ have_rpm = !!(config & pmbus_fan_rpm_mask[id]);
+ if (want_rpm == have_rpm)
+ return pmbus_read_word_data(client, page,
+ pmbus_fan_command_registers[id]);
+
+ /* Can't sensibly map between RPM and PWM, just return zero */
+ return 0;
+}
+
+int pmbus_get_fan_rate_device(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode)
+{
+ return pmbus_get_fan_rate(client, page, id, mode, false);
+}
+EXPORT_SYMBOL_GPL(pmbus_get_fan_rate_device);
+
+int pmbus_get_fan_rate_cached(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode)
+{
+ return pmbus_get_fan_rate(client, page, id, mode, true);
+}
+EXPORT_SYMBOL_GPL(pmbus_get_fan_rate_cached);
+
static void pmbus_clear_fault_page(struct i2c_client *client, int page)
{
_pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
@@ -513,7 +675,7 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
/* X = 1/m * (Y * 10^-R - b) */
R = -R;
/* scale result to milli-units for everything but fans */
- if (sensor->class != PSC_FAN) {
+ if (!(sensor->class == PSC_FAN || sensor->class == PSC_PWM)) {
R += 3;
b *= 1000;
}
@@ -568,6 +730,9 @@ static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
{
long val;
+ if (!sensor->convert)
+ return sensor->data;
+
switch (data->info->format[sensor->class]) {
case direct:
val = pmbus_reg2data_direct(data, sensor);
@@ -672,7 +837,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
}
/* Calculate Y = (m * X + b) * 10^R */
- if (sensor->class != PSC_FAN) {
+ if (!(sensor->class == PSC_FAN || sensor->class == PSC_PWM)) {
R -= 3; /* Adjust R and b for data in milli-units */
b *= 1000;
}
@@ -703,6 +868,9 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
{
u16 regval;
+ if (!sensor->convert)
+ return val;
+
switch (data->info->format[sensor->class]) {
case direct:
regval = pmbus_data2reg_direct(data, sensor, val);
@@ -915,7 +1083,8 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
const char *name, const char *type,
int seq, int page, int reg,
enum pmbus_sensor_classes class,
- bool update, bool readonly)
+ bool update, bool readonly,
+ bool convert)
{
struct pmbus_sensor *sensor;
struct device_attribute *a;
@@ -925,12 +1094,18 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
return NULL;
a = &sensor->attribute;
- snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
- name, seq, type);
+ if (type)
+ snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
+ name, seq, type);
+ else
+ snprintf(sensor->name, sizeof(sensor->name), "%s%d",
+ name, seq);
+
sensor->page = page;
sensor->reg = reg;
sensor->class = class;
sensor->update = update;
+ sensor->convert = convert;
pmbus_dev_attr_init(a, sensor->name,
readonly ? S_IRUGO : S_IRUGO | S_IWUSR,
pmbus_show_sensor, pmbus_set_sensor);
@@ -1029,7 +1204,7 @@ static int pmbus_add_limit_attrs(struct i2c_client *client,
curr = pmbus_add_sensor(data, name, l->attr, index,
page, l->reg, attr->class,
attr->update || l->update,
- false);
+ false, true);
if (!curr)
return -ENOMEM;
if (l->sbit && (info->func[page] & attr->sfunc)) {
@@ -1068,7 +1243,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
return ret;
}
base = pmbus_add_sensor(data, name, "input", index, page, attr->reg,
- attr->class, true, true);
+ attr->class, true, true, true);
if (!base)
return -ENOMEM;
if (attr->sfunc) {
@@ -1592,13 +1767,6 @@ static const int pmbus_fan_registers[] = {
PMBUS_READ_FAN_SPEED_4
};
-static const int pmbus_fan_config_registers[] = {
- PMBUS_FAN_CONFIG_12,
- PMBUS_FAN_CONFIG_12,
- PMBUS_FAN_CONFIG_34,
- PMBUS_FAN_CONFIG_34
-};
-
static const int pmbus_fan_status_registers[] = {
PMBUS_STATUS_FAN_12,
PMBUS_STATUS_FAN_12,
@@ -1621,6 +1789,42 @@ static const u32 pmbus_fan_status_flags[] = {
};
/* Fans */
+
+/* Precondition: FAN_CONFIG_x_y and FAN_COMMAND_x must exist for the fan ID */
+static int pmbus_add_fan_ctrl(struct i2c_client *client,
+ struct pmbus_data *data, int index, int page, int id,
+ u8 config)
+{
+ struct pmbus_sensor *sensor;
+
+ sensor = pmbus_add_sensor(data, "fan", "target", index, page,
+ PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN,
+ false, false, true);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ if (!((data->info->func[page] & PMBUS_HAVE_PWM12) ||
+ (data->info->func[page] & PMBUS_HAVE_PWM34)))
+ return 0;
+
+ sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
+ PMBUS_VIRT_PWM_1 + id, PSC_PWM,
+ false, false, true);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor = pmbus_add_sensor(data, "pwm", "enable", index, page,
+ PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM,
+ true, false, false);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int pmbus_add_fan_attributes(struct i2c_client *client,
struct pmbus_data *data)
{
@@ -1655,9 +1859,18 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
if (pmbus_add_sensor(data, "fan", "input", index,
page, pmbus_fan_registers[f],
- PSC_FAN, true, true) == NULL)
+ PSC_FAN, true, true, true) == NULL)
return -ENOMEM;
+ /* Fan control */
+ if (pmbus_check_word_register(client, page,
+ pmbus_fan_command_registers[f])) {
+ ret = pmbus_add_fan_ctrl(client, data, index,
+ page, f, regval);
+ if (ret < 0)
+ return ret;
+ }
+
/*
* Each fan status register covers multiple fans,
* so we have to do some magic.
@@ -2168,6 +2381,14 @@ int pmbus_do_remove(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_do_remove);
+struct dentry *pmbus_get_debugfs_dir(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ return data->debugfs;
+}
+EXPORT_SYMBOL_GPL(pmbus_get_debugfs_dir);
+
static int __init pmbus_core_init(void)
{
pmbus_debugfs_dir = debugfs_create_dir("pmbus", NULL);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 25d2834..2be7775 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -179,6 +179,7 @@ struct sht15_data {
* sht15_crc8() - compute crc8
* @data: sht15 specific data.
* @value: sht15 retrieved data.
+ * @len: Length of retrieved data
*
* This implements section 2 of the CRC datasheet.
*/
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 06706d2..190e7b3 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -41,7 +41,7 @@
/**
* struct sht21 - SHT21 device specific data
- * @hwmon_dev: device registered with hwmon
+ * @client: I2C client device
* @lock: mutex to protect measurement values
* @last_update: time of last update (jiffies)
* @temperature: cached temperature measurement value
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 6ea99cd..370b57d 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -732,6 +732,13 @@ static int sht3x_probe(struct i2c_client *client,
mutex_init(&data->i2c_lock);
mutex_init(&data->data_lock);
+ /*
+ * An attempt to read limits register too early
+ * causes a NACK response from the chip.
+ * Waiting for an empirical delay of 500 us solves the issue.
+ */
+ usleep_range(500, 600);
+
ret = limits_update(data);
if (ret)
return ret;
diff --git a/drivers/hwmon/w83773g.c b/drivers/hwmon/w83773g.c
new file mode 100644
index 0000000..e858093
--- /dev/null
+++ b/drivers/hwmon/w83773g.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Driver for the Nuvoton W83773G SMBus temperature sensor IC.
+ * Supported models: W83773G
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+/* W83773 has 3 channels */
+#define W83773_CHANNELS 3
+
+/* The W83773 registers */
+#define W83773_CONVERSION_RATE_REG_READ 0x04
+#define W83773_CONVERSION_RATE_REG_WRITE 0x0A
+#define W83773_MANUFACTURER_ID_REG 0xFE
+#define W83773_LOCAL_TEMP 0x00
+
+static const u8 W83773_STATUS[2] = { 0x02, 0x17 };
+
+static const u8 W83773_TEMP_LSB[2] = { 0x10, 0x25 };
+static const u8 W83773_TEMP_MSB[2] = { 0x01, 0x24 };
+
+static const u8 W83773_OFFSET_LSB[2] = { 0x12, 0x16 };
+static const u8 W83773_OFFSET_MSB[2] = { 0x11, 0x15 };
+
+/* this is the number of sensors in the device */
+static const struct i2c_device_id w83773_id[] = {
+ { "w83773g" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, w83773_id);
+
+static const struct of_device_id w83773_of_match[] = {
+ {
+ .compatible = "nuvoton,w83773g"
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, w83773_of_match);
+
+static inline long temp_of_local(s8 reg)
+{
+ return reg * 1000;
+}
+
+static inline long temp_of_remote(s8 hb, u8 lb)
+{
+ return (hb << 3 | lb >> 5) * 125;
+}
+
+static int get_local_temp(struct regmap *regmap, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_LOCAL_TEMP, &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_local(regval);
+ return 0;
+}
+
+static int get_remote_temp(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval_high;
+ unsigned int regval_low;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_TEMP_MSB[index], &regval_high);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, W83773_TEMP_LSB[index], &regval_low);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_remote(regval_high, regval_low);
+ return 0;
+}
+
+static int get_fault(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_STATUS[index], &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = (regval & 0x04) >> 2;
+ return 0;
+}
+
+static int get_offset(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval_high;
+ unsigned int regval_low;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_OFFSET_MSB[index], &regval_high);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, W83773_OFFSET_LSB[index], &regval_low);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_remote(regval_high, regval_low);
+ return 0;
+}
+
+static int set_offset(struct regmap *regmap, int index, long val)
+{
+ int ret;
+ u8 high_byte;
+ u8 low_byte;
+
+ val = clamp_val(val, -127825, 127825);
+ /* offset value equals to (high_byte << 3 | low_byte >> 5) * 125 */
+ val /= 125;
+ high_byte = val >> 3;
+ low_byte = (val & 0x07) << 5;
+
+ ret = regmap_write(regmap, W83773_OFFSET_MSB[index], high_byte);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(regmap, W83773_OFFSET_LSB[index], low_byte);
+}
+
+static int get_update_interval(struct regmap *regmap, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_CONVERSION_RATE_REG_READ, &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = 16000 >> regval;
+ return 0;
+}
+
+static int set_update_interval(struct regmap *regmap, long val)
+{
+ int rate;
+
+ /*
+ * For valid rates, interval can be calculated as
+ * interval = (1 << (8 - rate)) * 62.5;
+ * Rounded rate is therefore
+ * rate = 8 - __fls(interval * 8 / (62.5 * 7));
+ * Use clamp_val() to avoid overflows, and to ensure valid input
+ * for __fls.
+ */
+ val = clamp_val(val, 62, 16000) * 10;
+ rate = 8 - __fls((val * 8 / (625 * 7)));
+ return regmap_write(regmap, W83773_CONVERSION_RATE_REG_WRITE, rate);
+}
+
+static int w83773_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ if (type == hwmon_chip) {
+ if (attr == hwmon_chip_update_interval)
+ return get_update_interval(regmap, val);
+ return -EOPNOTSUPP;
+ }
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (channel == 0)
+ return get_local_temp(regmap, val);
+ return get_remote_temp(regmap, channel - 1, val);
+ case hwmon_temp_fault:
+ return get_fault(regmap, channel - 1, val);
+ case hwmon_temp_offset:
+ return get_offset(regmap, channel - 1, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int w83773_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ if (type == hwmon_chip && attr == hwmon_chip_update_interval)
+ return set_update_interval(regmap, val);
+
+ if (type == hwmon_temp && attr == hwmon_temp_offset)
+ return set_offset(regmap, channel - 1, val);
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t w83773_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return 0644;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_fault:
+ return 0444;
+ case hwmon_temp_offset:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const u32 w83773_chip_config[] = {
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
+ 0
+};
+
+static const struct hwmon_channel_info w83773_chip = {
+ .type = hwmon_chip,
+ .config = w83773_chip_config,
+};
+
+static const u32 w83773_temp_config[] = {
+ HWMON_T_INPUT,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
+ 0
+};
+
+static const struct hwmon_channel_info w83773_temp = {
+ .type = hwmon_temp,
+ .config = w83773_temp_config,
+};
+
+static const struct hwmon_channel_info *w83773_info[] = {
+ &w83773_chip,
+ &w83773_temp,
+ NULL
+};
+
+static const struct hwmon_ops w83773_ops = {
+ .is_visible = w83773_is_visible,
+ .read = w83773_read,
+ .write = w83773_write,
+};
+
+static const struct hwmon_chip_info w83773_chip_info = {
+ .ops = &w83773_ops,
+ .info = w83773_info,
+};
+
+static const struct regmap_config w83773_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int w83773_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(client, &w83773_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ /* Set the conversion rate to 2 Hz */
+ ret = regmap_write(regmap, W83773_CONVERSION_RATE_REG_WRITE, 0x05);
+ if (ret < 0) {
+ dev_err(&client->dev, "error writing config rate register\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(client, regmap);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ client->name,
+ regmap,
+ &w83773_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct i2c_driver w83773_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "w83773g",
+ .of_match_table = of_match_ptr(w83773_of_match),
+ },
+ .probe = w83773_probe,
+ .id_table = w83773_id,
+};
+
+module_i2c_driver(w83773_driver);
+
+MODULE_AUTHOR("Lei YU <mine260309@gmail.com>");
+MODULE_DESCRIPTION("W83773G temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwtracing/stm/ftrace.c b/drivers/hwtracing/stm/ftrace.c
index bd126a7..7da7564 100644
--- a/drivers/hwtracing/stm/ftrace.c
+++ b/drivers/hwtracing/stm/ftrace.c
@@ -42,9 +42,11 @@ static struct stm_ftrace {
* @len: length of the data packet
*/
static void notrace
-stm_ftrace_write(const void *buf, unsigned int len)
+stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len)
{
- stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len);
+ struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace);
+
+ stm_source_write(&stm->data, STM_FTRACE_CHAN, buf, len);
}
static int stm_ftrace_link(struct stm_source_data *data)
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 0d05dad..44cffad 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -379,7 +379,7 @@ static int cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_device_id cht_wc_i2c_adap_id_table[] = {
+static const struct platform_device_id cht_wc_i2c_adap_id_table[] = {
{ .name = "cht_wcove_ext_chgr" },
{},
};
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 21bf619..9fee4c0 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -280,8 +280,6 @@ struct dw_i2c_dev {
int (*acquire_lock)(struct dw_i2c_dev *dev);
void (*release_lock)(struct dw_i2c_dev *dev);
bool pm_disabled;
- bool suspended;
- bool skip_resume;
void (*disable)(struct dw_i2c_dev *dev);
void (*disable_int)(struct dw_i2c_dev *dev);
int (*init)(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 58add69..153b947 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -42,6 +42,7 @@
#include <linux/reset.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include "i2c-designware-core.h"
@@ -372,6 +373,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
adap->dev.of_node = pdev->dev.of_node;
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED);
+
/* The code below assumes runtime PM to be disabled. */
WARN_ON(pm_runtime_enabled(&pdev->dev));
@@ -435,12 +441,24 @@ MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
#ifdef CONFIG_PM_SLEEP
static int dw_i2c_plat_prepare(struct device *dev)
{
- return pm_runtime_suspended(dev);
+ /*
+ * If the ACPI companion device object is present for this device, it
+ * may be accessed during suspend and resume of other devices via I2C
+ * operation regions, so tell the PM core and middle layers to avoid
+ * skipping system suspend/resume callbacks for it in that case.
+ */
+ return !has_acpi_companion(dev);
}
static void dw_i2c_plat_complete(struct device *dev)
{
- if (dev->power.direct_complete)
+ /*
+ * The device can only be in runtime suspend at this point if it has not
+ * been resumed throughout the ending system suspend/resume cycle, so if
+ * the platform firmware might mess up with it, request the runtime PM
+ * framework to resume it.
+ */
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
}
#else
@@ -453,16 +471,9 @@ static int dw_i2c_plat_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
- if (i_dev->suspended) {
- i_dev->skip_resume = true;
- return 0;
- }
-
i_dev->disable(i_dev);
i2c_dw_plat_prepare_clk(i_dev, false);
- i_dev->suspended = true;
-
return 0;
}
@@ -470,19 +481,9 @@ static int dw_i2c_plat_resume(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
- if (!i_dev->suspended)
- return 0;
-
- if (i_dev->skip_resume) {
- i_dev->skip_resume = false;
- return 0;
- }
-
i2c_dw_plat_prepare_clk(i_dev, true);
i_dev->init(i_dev);
- i_dev->suspended = false;
-
return 0;
}
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 174579d..462948e 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -983,7 +983,7 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
if (adapdata->smba) {
i2c_del_adapter(adap);
- if (adapdata->port == (0 << 1)) {
+ if (adapdata->port == (0 << piix4_port_shift_sb800)) {
release_region(adapdata->smba, SMBIOSIZE);
if (adapdata->sb800_main)
release_region(SB800_PIIX4_SMB_IDX, 2);
diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h
index dab5176..d4f9cef 100644
--- a/drivers/i2c/busses/i2c-stm32.h
+++ b/drivers/i2c/busses/i2c-stm32.h
@@ -1,10 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* i2c-stm32.h
*
* Copyright (C) M'boumba Cedric Madianga 2017
+ * Copyright (C) STMicroelectronics 2017
* Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
*
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _I2C_STM32_H
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index 4ec1084..47c8d00 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for STMicroelectronics STM32 I2C controller
*
@@ -6,11 +7,11 @@
* http://www.st.com/resource/en/reference_manual/DM00031020.pdf
*
* Copyright (C) M'boumba Cedric Madianga 2016
+ * Copyright (C) STMicroelectronics 2017
* Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
*
* This driver is based on i2c-st.c
*
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index d4a6e9c..b445b3b 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for STMicroelectronics STM32F7 I2C controller
*
@@ -7,11 +8,11 @@
* http://www.st.com/resource/en/reference_manual/dm00124865.pdf
*
* Copyright (C) M'boumba Cedric Madianga 2017
+ * Copyright (C) STMicroelectronics 2017
* Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
*
* This driver is based on i2c-stm32f4.c
*
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 706164b..f7829a7 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -821,8 +821,12 @@ void i2c_unregister_device(struct i2c_client *client)
{
if (!client)
return;
- if (client->dev.of_node)
+
+ if (client->dev.of_node) {
of_node_clear_flag(client->dev.of_node, OF_POPULATED);
+ of_node_put(client->dev.of_node);
+ }
+
if (ACPI_COMPANION(&client->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
device_unregister(&client->dev);
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index 4bb9927..a1082c0 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -397,16 +397,17 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
the underlying bus driver */
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
+ if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
+ dev_err(&adapter->dev, "Invalid block %s size %d\n",
+ read_write == I2C_SMBUS_READ ? "read" : "write",
+ data->block[0]);
+ return -EINVAL;
+ }
+
if (read_write == I2C_SMBUS_READ) {
msg[1].len = data->block[0];
} else {
msg[0].len = data->block[0] + 1;
- if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) {
- dev_err(&adapter->dev,
- "Invalid block write size %d\n",
- data->block[0]);
- return -EINVAL;
- }
for (i = 1; i <= data->block[0]; i++)
msgbuf0[i] = data->block[i];
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index ef86296..39e3b34 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -629,6 +629,18 @@ config SPEAR_ADC
To compile this driver as a module, choose M here: the
module will be called spear_adc.
+config SD_ADC_MODULATOR
+ tristate "Generic sigma delta modulator"
+ depends on OF
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Select this option to enables sigma delta modulator. This driver can
+ support generic sigma delta modulators.
+
+ This driver can also be built as a module. If so, the module
+ will be called sd_adc_modulator.
+
config STM32_ADC_CORE
tristate "STMicroelectronics STM32 adc core"
depends on ARCH_STM32 || COMPILE_TEST
@@ -656,6 +668,31 @@ config STM32_ADC
This driver can also be built as a module. If so, the module
will be called stm32-adc.
+config STM32_DFSDM_CORE
+ tristate "STMicroelectronics STM32 DFSDM core"
+ depends on (ARCH_STM32 && OF) || COMPILE_TEST
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ Select this option to enable the driver for STMicroelectronics
+ STM32 digital filter for sigma delta converter.
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-dfsdm-core.
+
+config STM32_DFSDM_ADC
+ tristate "STMicroelectronics STM32 dfsdm adc"
+ depends on (ARCH_STM32 && OF) || COMPILE_TEST
+ select STM32_DFSDM_CORE
+ select REGMAP_MMIO
+ select IIO_BUFFER_HW_CONSUMER
+ help
+ Select this option to support ADCSigma delta modulator for
+ STMicroelectronics STM32 digital filter for sigma delta converter.
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-dfsdm-adc.
+
config STX104
tristate "Apex Embedded Systems STX104 driver"
depends on PC104 && X86 && ISA_BUS_API
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 9572c10..28a9423 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -64,6 +64,8 @@ obj-$(CONFIG_STX104) += stx104.o
obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
obj-$(CONFIG_STM32_ADC) += stm32-adc.o
+obj-$(CONFIG_STM32_DFSDM_CORE) += stm32-dfsdm-core.o
+obj-$(CONFIG_STM32_DFSDM_ADC) += stm32-dfsdm-adc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
obj-$(CONFIG_TI_ADC084S021) += ti-adc084s021.o
@@ -82,3 +84,4 @@ obj-$(CONFIG_VF610_ADC) += vf610_adc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
+obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
diff --git a/drivers/iio/adc/sd_adc_modulator.c b/drivers/iio/adc/sd_adc_modulator.c
new file mode 100644
index 0000000..560d8c7
--- /dev/null
+++ b/drivers/iio/adc/sd_adc_modulator.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic sigma delta modulator driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+static const struct iio_info iio_sd_mod_iio_info;
+
+static const struct iio_chan_spec iio_sd_mod_ch = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 1,
+ .shift = 0,
+ },
+};
+
+static int iio_sd_mod_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *iio;
+
+ iio = devm_iio_device_alloc(dev, 0);
+ if (!iio)
+ return -ENOMEM;
+
+ iio->dev.parent = dev;
+ iio->dev.of_node = dev->of_node;
+ iio->name = dev_name(dev);
+ iio->info = &iio_sd_mod_iio_info;
+ iio->modes = INDIO_BUFFER_HARDWARE;
+
+ iio->num_channels = 1;
+ iio->channels = &iio_sd_mod_ch;
+
+ platform_set_drvdata(pdev, iio);
+
+ return devm_iio_device_register(&pdev->dev, iio);
+}
+
+static const struct of_device_id sd_adc_of_match[] = {
+ { .compatible = "sd-modulator" },
+ { .compatible = "ads1201" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sd_adc_of_match);
+
+static struct platform_driver iio_sd_mod_adc = {
+ .driver = {
+ .name = "iio_sd_adc_mod",
+ .of_match_table = of_match_ptr(sd_adc_of_match),
+ },
+ .probe = iio_sd_mod_probe,
+};
+
+module_platform_driver(iio_sd_mod_adc);
+
+MODULE_DESCRIPTION("Basic sigma delta modulator");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
new file mode 100644
index 0000000..daa026d
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -0,0 +1,1205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is the ADC part of the STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/hw-consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "stm32-dfsdm.h"
+
+#define DFSDM_DMA_BUFFER_SIZE (4 * PAGE_SIZE)
+
+/* Conversion timeout */
+#define DFSDM_TIMEOUT_US 100000
+#define DFSDM_TIMEOUT (msecs_to_jiffies(DFSDM_TIMEOUT_US / 1000))
+
+/* Oversampling attribute default */
+#define DFSDM_DEFAULT_OVERSAMPLING 100
+
+/* Oversampling max values */
+#define DFSDM_MAX_INT_OVERSAMPLING 256
+#define DFSDM_MAX_FL_OVERSAMPLING 1024
+
+/* Max sample resolutions */
+#define DFSDM_MAX_RES BIT(31)
+#define DFSDM_DATA_RES BIT(23)
+
+enum sd_converter_type {
+ DFSDM_AUDIO,
+ DFSDM_IIO,
+};
+
+struct stm32_dfsdm_dev_data {
+ int type;
+ int (*init)(struct iio_dev *indio_dev);
+ unsigned int num_channels;
+ const struct regmap_config *regmap_cfg;
+};
+
+struct stm32_dfsdm_adc {
+ struct stm32_dfsdm *dfsdm;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ unsigned int fl_id;
+ unsigned int ch_id;
+
+ /* ADC specific */
+ unsigned int oversamp;
+ struct iio_hw_consumer *hwc;
+ struct completion completion;
+ u32 *buffer;
+
+ /* Audio specific */
+ unsigned int spi_freq; /* SPI bus clock frequency */
+ unsigned int sample_freq; /* Sample frequency after filter decimation */
+ int (*cb)(const void *data, size_t size, void *cb_priv);
+ void *cb_priv;
+
+ /* DMA */
+ u8 *rx_buf;
+ unsigned int bufi; /* Buffer current position */
+ unsigned int buf_sz; /* Buffer size */
+ struct dma_chan *dma_chan;
+ dma_addr_t dma_buf;
+};
+
+struct stm32_dfsdm_str2field {
+ const char *name;
+ unsigned int val;
+};
+
+/* DFSDM channel serial interface type */
+static const struct stm32_dfsdm_str2field stm32_dfsdm_chan_type[] = {
+ { "SPI_R", 0 }, /* SPI with data on rising edge */
+ { "SPI_F", 1 }, /* SPI with data on falling edge */
+ { "MANCH_R", 2 }, /* Manchester codec, rising edge = logic 0 */
+ { "MANCH_F", 3 }, /* Manchester codec, falling edge = logic 1 */
+ {},
+};
+
+/* DFSDM channel clock source */
+static const struct stm32_dfsdm_str2field stm32_dfsdm_chan_src[] = {
+ /* External SPI clock (CLKIN x) */
+ { "CLKIN", DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL },
+ /* Internal SPI clock (CLKOUT) */
+ { "CLKOUT", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL },
+ /* Internal SPI clock divided by 2 (falling edge) */
+ { "CLKOUT_F", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING },
+ /* Internal SPI clock divided by 2 (falling edge) */
+ { "CLKOUT_R", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING },
+ {},
+};
+
+static int stm32_dfsdm_str2val(const char *str,
+ const struct stm32_dfsdm_str2field *list)
+{
+ const struct stm32_dfsdm_str2field *p = list;
+
+ for (p = list; p && p->name; p++)
+ if (!strcmp(p->name, str))
+ return p->val;
+
+ return -EINVAL;
+}
+
+static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
+ unsigned int fast, unsigned int oversamp)
+{
+ unsigned int i, d, fosr, iosr;
+ u64 res;
+ s64 delta;
+ unsigned int m = 1; /* multiplication factor */
+ unsigned int p = fl->ford; /* filter order (ford) */
+
+ pr_debug("%s: Requested oversampling: %d\n", __func__, oversamp);
+ /*
+ * This function tries to compute filter oversampling and integrator
+ * oversampling, base on oversampling ratio requested by user.
+ *
+ * Decimation d depends on the filter order and the oversampling ratios.
+ * ford: filter order
+ * fosr: filter over sampling ratio
+ * iosr: integrator over sampling ratio
+ */
+ if (fl->ford == DFSDM_FASTSINC_ORDER) {
+ m = 2;
+ p = 2;
+ }
+
+ /*
+ * Look for filter and integrator oversampling ratios which allows
+ * to reach 24 bits data output resolution.
+ * Leave as soon as if exact resolution if reached.
+ * Otherwise the higher resolution below 32 bits is kept.
+ */
+ for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
+ for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
+ if (fast)
+ d = fosr * iosr;
+ else if (fl->ford == DFSDM_FASTSINC_ORDER)
+ d = fosr * (iosr + 3) + 2;
+ else
+ d = fosr * (iosr - 1 + p) + p;
+
+ if (d > oversamp)
+ break;
+ else if (d != oversamp)
+ continue;
+ /*
+ * Check resolution (limited to signed 32 bits)
+ * res <= 2^31
+ * Sincx filters:
+ * res = m * fosr^p x iosr (with m=1, p=ford)
+ * FastSinc filter
+ * res = m * fosr^p x iosr (with m=2, p=2)
+ */
+ res = fosr;
+ for (i = p - 1; i > 0; i--) {
+ res = res * (u64)fosr;
+ if (res > DFSDM_MAX_RES)
+ break;
+ }
+ if (res > DFSDM_MAX_RES)
+ continue;
+ res = res * (u64)m * (u64)iosr;
+ if (res > DFSDM_MAX_RES)
+ continue;
+
+ delta = res - DFSDM_DATA_RES;
+
+ if (res >= fl->res) {
+ fl->res = res;
+ fl->fosr = fosr;
+ fl->iosr = iosr;
+ fl->fast = fast;
+ pr_debug("%s: fosr = %d, iosr = %d\n",
+ __func__, fl->fosr, fl->iosr);
+ }
+
+ if (!delta)
+ return 0;
+ }
+ }
+
+ if (!fl->fosr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stm32_dfsdm_start_channel(struct stm32_dfsdm *dfsdm,
+ unsigned int ch_id)
+{
+ return regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(ch_id),
+ DFSDM_CHCFGR1_CHEN_MASK,
+ DFSDM_CHCFGR1_CHEN(1));
+}
+
+static void stm32_dfsdm_stop_channel(struct stm32_dfsdm *dfsdm,
+ unsigned int ch_id)
+{
+ regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(ch_id),
+ DFSDM_CHCFGR1_CHEN_MASK, DFSDM_CHCFGR1_CHEN(0));
+}
+
+static int stm32_dfsdm_chan_configure(struct stm32_dfsdm *dfsdm,
+ struct stm32_dfsdm_channel *ch)
+{
+ unsigned int id = ch->id;
+ struct regmap *regmap = dfsdm->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_SITP_MASK,
+ DFSDM_CHCFGR1_SITP(ch->type));
+ if (ret < 0)
+ return ret;
+ ret = regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_SPICKSEL_MASK,
+ DFSDM_CHCFGR1_SPICKSEL(ch->src));
+ if (ret < 0)
+ return ret;
+ return regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_CHINSEL_MASK,
+ DFSDM_CHCFGR1_CHINSEL(ch->alt_si));
+}
+
+static int stm32_dfsdm_start_filter(struct stm32_dfsdm *dfsdm,
+ unsigned int fl_id)
+{
+ int ret;
+
+ /* Enable filter */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_DFEN_MASK, DFSDM_CR1_DFEN(1));
+ if (ret < 0)
+ return ret;
+
+ /* Start conversion */
+ return regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_RSWSTART_MASK,
+ DFSDM_CR1_RSWSTART(1));
+}
+
+static void stm32_dfsdm_stop_filter(struct stm32_dfsdm *dfsdm, unsigned int fl_id)
+{
+ /* Disable conversion */
+ regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_DFEN_MASK, DFSDM_CR1_DFEN(0));
+}
+
+static int stm32_dfsdm_filter_configure(struct stm32_dfsdm *dfsdm,
+ unsigned int fl_id, unsigned int ch_id)
+{
+ struct regmap *regmap = dfsdm->regmap;
+ struct stm32_dfsdm_filter *fl = &dfsdm->fl_list[fl_id];
+ int ret;
+
+ /* Average integrator oversampling */
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_IOSR_MASK,
+ DFSDM_FCR_IOSR(fl->iosr - 1));
+ if (ret)
+ return ret;
+
+ /* Filter order and Oversampling */
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FOSR_MASK,
+ DFSDM_FCR_FOSR(fl->fosr - 1));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FORD_MASK,
+ DFSDM_FCR_FORD(fl->ford));
+ if (ret)
+ return ret;
+
+ /* No scan mode supported for the moment */
+ ret = regmap_update_bits(regmap, DFSDM_CR1(fl_id), DFSDM_CR1_RCH_MASK,
+ DFSDM_CR1_RCH(ch_id));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_RSYNC_MASK,
+ DFSDM_CR1_RSYNC(fl->sync_mode));
+}
+
+static int stm32_dfsdm_channel_parse_of(struct stm32_dfsdm *dfsdm,
+ struct iio_dev *indio_dev,
+ struct iio_chan_spec *ch)
+{
+ struct stm32_dfsdm_channel *df_ch;
+ const char *of_str;
+ int chan_idx = ch->scan_index;
+ int ret, val;
+
+ ret = of_property_read_u32_index(indio_dev->dev.of_node,
+ "st,adc-channels", chan_idx,
+ &ch->channel);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'st,adc-channels' for idx %d\n",
+ chan_idx);
+ return ret;
+ }
+ if (ch->channel >= dfsdm->num_chs) {
+ dev_err(&indio_dev->dev,
+ " Error bad channel number %d (max = %d)\n",
+ ch->channel, dfsdm->num_chs);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-names", chan_idx,
+ &ch->datasheet_name);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'st,adc-channel-names' for idx %d\n",
+ chan_idx);
+ return ret;
+ }
+
+ df_ch = &dfsdm->ch_list[ch->channel];
+ df_ch->id = ch->channel;
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-types", chan_idx,
+ &of_str);
+ if (!ret) {
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_type);
+ if (val < 0)
+ return val;
+ } else {
+ val = 0;
+ }
+ df_ch->type = val;
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-clk-src", chan_idx,
+ &of_str);
+ if (!ret) {
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_src);
+ if (val < 0)
+ return val;
+ } else {
+ val = 0;
+ }
+ df_ch->src = val;
+
+ ret = of_property_read_u32_index(indio_dev->dev.of_node,
+ "st,adc-alt-channel", chan_idx,
+ &df_ch->alt_si);
+ if (ret < 0)
+ df_ch->alt_si = 0;
+
+ return 0;
+}
+
+static ssize_t dfsdm_adc_audio_get_spiclk(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", adc->spi_freq);
+}
+
+static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+ struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
+ unsigned int sample_freq = adc->sample_freq;
+ unsigned int spi_freq;
+ int ret;
+
+ dev_err(&indio_dev->dev, "enter %s\n", __func__);
+ /* If DFSDM is master on SPI, SPI freq can not be updated */
+ if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ return -EPERM;
+
+ ret = kstrtoint(buf, 0, &spi_freq);
+ if (ret)
+ return ret;
+
+ if (!spi_freq)
+ return -EINVAL;
+
+ if (sample_freq) {
+ if (spi_freq % sample_freq)
+ dev_warn(&indio_dev->dev,
+ "Sampling rate not accurate (%d)\n",
+ spi_freq / (spi_freq / sample_freq));
+
+ ret = stm32_dfsdm_set_osrs(fl, 0, (spi_freq / sample_freq));
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "No filter parameters that match!\n");
+ return ret;
+ }
+ }
+ adc->spi_freq = spi_freq;
+
+ return len;
+}
+
+static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc, bool dma)
+{
+ struct regmap *regmap = adc->dfsdm->regmap;
+ int ret;
+ unsigned int dma_en = 0, cont_en = 0;
+
+ ret = stm32_dfsdm_start_channel(adc->dfsdm, adc->ch_id);
+ if (ret < 0)
+ return ret;
+
+ ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id,
+ adc->ch_id);
+ if (ret < 0)
+ goto stop_channels;
+
+ if (dma) {
+ /* Enable DMA transfer*/
+ dma_en = DFSDM_CR1_RDMAEN(1);
+ /* Enable conversion triggered by SPI clock*/
+ cont_en = DFSDM_CR1_RCONT(1);
+ }
+ /* Enable DMA transfer*/
+ ret = regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, dma_en);
+ if (ret < 0)
+ goto stop_channels;
+
+ /* Enable conversion triggered by SPI clock*/
+ ret = regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, cont_en);
+ if (ret < 0)
+ goto stop_channels;
+
+ ret = stm32_dfsdm_start_filter(adc->dfsdm, adc->fl_id);
+ if (ret < 0)
+ goto stop_channels;
+
+ return 0;
+
+stop_channels:
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, 0);
+
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, 0);
+ stm32_dfsdm_stop_channel(adc->dfsdm, adc->fl_id);
+
+ return ret;
+}
+
+static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc)
+{
+ struct regmap *regmap = adc->dfsdm->regmap;
+
+ stm32_dfsdm_stop_filter(adc->dfsdm, adc->fl_id);
+
+ /* Clean conversion options */
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, 0);
+
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, 0);
+
+ stm32_dfsdm_stop_channel(adc->dfsdm, adc->ch_id);
+}
+
+static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev,
+ unsigned int val)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ unsigned int watermark = DFSDM_DMA_BUFFER_SIZE / 2;
+
+ /*
+ * DMA cyclic transfers are used, buffer is split into two periods.
+ * There should be :
+ * - always one buffer (period) DMA is working on
+ * - one buffer (period) driver pushed to ASoC side.
+ */
+ watermark = min(watermark, val * (unsigned int)(sizeof(u32)));
+ adc->buf_sz = watermark * 2;
+
+ return 0;
+}
+
+static unsigned int stm32_dfsdm_adc_dma_residue(struct stm32_dfsdm_adc *adc)
+{
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(adc->dma_chan,
+ adc->dma_chan->cookie,
+ &state);
+ if (status == DMA_IN_PROGRESS) {
+ /* Residue is size in bytes from end of buffer */
+ unsigned int i = adc->buf_sz - state.residue;
+ unsigned int size;
+
+ /* Return available bytes */
+ if (i >= adc->bufi)
+ size = i - adc->bufi;
+ else
+ size = adc->buf_sz + i - adc->bufi;
+
+ return size;
+ }
+
+ return 0;
+}
+
+static void stm32_dfsdm_audio_dma_buffer_done(void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int available = stm32_dfsdm_adc_dma_residue(adc);
+ size_t old_pos;
+
+ /*
+ * FIXME: In Kernel interface does not support cyclic DMA buffer,and
+ * offers only an interface to push data samples per samples.
+ * For this reason IIO buffer interface is not used and interface is
+ * bypassed using a private callback registered by ASoC.
+ * This should be a temporary solution waiting a cyclic DMA engine
+ * support in IIO.
+ */
+
+ dev_dbg(&indio_dev->dev, "%s: pos = %d, available = %d\n", __func__,
+ adc->bufi, available);
+ old_pos = adc->bufi;
+
+ while (available >= indio_dev->scan_bytes) {
+ u32 *buffer = (u32 *)&adc->rx_buf[adc->bufi];
+
+ /* Mask 8 LSB that contains the channel ID */
+ *buffer = (*buffer & 0xFFFFFF00) << 8;
+ available -= indio_dev->scan_bytes;
+ adc->bufi += indio_dev->scan_bytes;
+ if (adc->bufi >= adc->buf_sz) {
+ if (adc->cb)
+ adc->cb(&adc->rx_buf[old_pos],
+ adc->buf_sz - old_pos, adc->cb_priv);
+ adc->bufi = 0;
+ old_pos = 0;
+ }
+ }
+ if (adc->cb)
+ adc->cb(&adc->rx_buf[old_pos], adc->bufi - old_pos,
+ adc->cb_priv);
+}
+
+static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (!adc->dma_chan)
+ return -EINVAL;
+
+ dev_dbg(&indio_dev->dev, "%s size=%d watermark=%d\n", __func__,
+ adc->buf_sz, adc->buf_sz / 2);
+
+ /* Prepare a DMA cyclic transaction */
+ desc = dmaengine_prep_dma_cyclic(adc->dma_chan,
+ adc->dma_buf,
+ adc->buf_sz, adc->buf_sz / 2,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc)
+ return -EBUSY;
+
+ desc->callback = stm32_dfsdm_audio_dma_buffer_done;
+ desc->callback_param = indio_dev;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dmaengine_terminate_all(adc->dma_chan);
+ return ret;
+ }
+
+ /* Issue pending DMA requests */
+ dma_async_issue_pending(adc->dma_chan);
+
+ return 0;
+}
+
+static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ /* Reset adc buffer index */
+ adc->bufi = 0;
+
+ ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
+ if (ret < 0)
+ return ret;
+
+ ret = stm32_dfsdm_start_conv(adc, true);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Can't start conversion\n");
+ goto stop_dfsdm;
+ }
+
+ if (adc->dma_chan) {
+ ret = stm32_dfsdm_adc_dma_start(indio_dev);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Can't start DMA\n");
+ goto err_stop_conv;
+ }
+ }
+
+ return 0;
+
+err_stop_conv:
+ stm32_dfsdm_stop_conv(adc);
+stop_dfsdm:
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return ret;
+}
+
+static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ if (adc->dma_chan)
+ dmaengine_terminate_all(adc->dma_chan);
+
+ stm32_dfsdm_stop_conv(adc);
+
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops stm32_dfsdm_buffer_setup_ops = {
+ .postenable = &stm32_dfsdm_postenable,
+ .predisable = &stm32_dfsdm_predisable,
+};
+
+/**
+ * stm32_dfsdm_get_buff_cb() - register a callback that will be called when
+ * DMA transfer period is achieved.
+ *
+ * @iio_dev: Handle to IIO device.
+ * @cb: Pointer to callback function:
+ * - data: pointer to data buffer
+ * - size: size in byte of the data buffer
+ * - private: pointer to consumer private structure.
+ * @private: Pointer to consumer private structure.
+ */
+int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
+ int (*cb)(const void *data, size_t size,
+ void *private),
+ void *private)
+{
+ struct stm32_dfsdm_adc *adc;
+
+ if (!iio_dev)
+ return -EINVAL;
+ adc = iio_priv(iio_dev);
+
+ adc->cb = cb;
+ adc->cb_priv = private;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_get_buff_cb);
+
+/**
+ * stm32_dfsdm_release_buff_cb - unregister buffer callback
+ *
+ * @iio_dev: Handle to IIO device.
+ */
+int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev)
+{
+ struct stm32_dfsdm_adc *adc;
+
+ if (!iio_dev)
+ return -EINVAL;
+ adc = iio_priv(iio_dev);
+
+ adc->cb = NULL;
+ adc->cb_priv = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_release_buff_cb);
+
+static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *res)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ long timeout;
+ int ret;
+
+ reinit_completion(&adc->completion);
+
+ adc->buffer = res;
+
+ ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(1));
+ if (ret < 0)
+ goto stop_dfsdm;
+
+ ret = stm32_dfsdm_start_conv(adc, false);
+ if (ret < 0) {
+ regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
+ goto stop_dfsdm;
+ }
+
+ timeout = wait_for_completion_interruptible_timeout(&adc->completion,
+ DFSDM_TIMEOUT);
+
+ /* Mask IRQ for regular conversion achievement*/
+ regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
+
+ if (timeout == 0)
+ ret = -ETIMEDOUT;
+ else if (timeout < 0)
+ ret = timeout;
+ else
+ ret = IIO_VAL_INT;
+
+ stm32_dfsdm_stop_conv(adc);
+
+stop_dfsdm:
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return ret;
+}
+
+static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+ struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
+ unsigned int spi_freq = adc->spi_freq;
+ int ret = -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = stm32_dfsdm_set_osrs(fl, 0, val);
+ if (!ret)
+ adc->oversamp = val;
+
+ return ret;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!val)
+ return -EINVAL;
+ if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ spi_freq = adc->dfsdm->spi_master_freq;
+
+ if (spi_freq % val)
+ dev_warn(&indio_dev->dev,
+ "Sampling rate not accurate (%d)\n",
+ spi_freq / (spi_freq / val));
+
+ ret = stm32_dfsdm_set_osrs(fl, 0, (spi_freq / val));
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "Not able to find parameter that match!\n");
+ return ret;
+ }
+ adc->sample_freq = val;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_hw_consumer_enable(adc->hwc);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "%s: IIO enable failed (channel %d)\n",
+ __func__, chan->channel);
+ return ret;
+ }
+ ret = stm32_dfsdm_single_conv(indio_dev, chan, val);
+ iio_hw_consumer_disable(adc->hwc);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "%s: Conversion failed (channel %d)\n",
+ __func__, chan->channel);
+ return ret;
+ }
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = adc->oversamp;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = adc->sample_freq;
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info stm32_dfsdm_info_audio = {
+ .hwfifo_set_watermark = stm32_dfsdm_set_watermark,
+ .read_raw = stm32_dfsdm_read_raw,
+ .write_raw = stm32_dfsdm_write_raw,
+};
+
+static const struct iio_info stm32_dfsdm_info_adc = {
+ .read_raw = stm32_dfsdm_read_raw,
+ .write_raw = stm32_dfsdm_write_raw,
+};
+
+static irqreturn_t stm32_dfsdm_irq(int irq, void *arg)
+{
+ struct stm32_dfsdm_adc *adc = arg;
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+ struct regmap *regmap = adc->dfsdm->regmap;
+ unsigned int status, int_en;
+
+ regmap_read(regmap, DFSDM_ISR(adc->fl_id), &status);
+ regmap_read(regmap, DFSDM_CR2(adc->fl_id), &int_en);
+
+ if (status & DFSDM_ISR_REOCF_MASK) {
+ /* Read the data register clean the IRQ status */
+ regmap_read(regmap, DFSDM_RDATAR(adc->fl_id), adc->buffer);
+ complete(&adc->completion);
+ }
+
+ if (status & DFSDM_ISR_ROVRF_MASK) {
+ if (int_en & DFSDM_CR2_ROVRIE_MASK)
+ dev_warn(&indio_dev->dev, "Overrun detected\n");
+ regmap_update_bits(regmap, DFSDM_ICR(adc->fl_id),
+ DFSDM_ICR_CLRROVRF_MASK,
+ DFSDM_ICR_CLRROVRF_MASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Define external info for SPI Frequency and audio sampling rate that can be
+ * configured by ASoC driver through consumer.h API
+ */
+static const struct iio_chan_spec_ext_info dfsdm_adc_audio_ext_info[] = {
+ /* spi_clk_freq : clock freq on SPI/manchester bus used by channel */
+ {
+ .name = "spi_clk_freq",
+ .shared = IIO_SHARED_BY_TYPE,
+ .read = dfsdm_adc_audio_get_spiclk,
+ .write = dfsdm_adc_audio_set_spiclk,
+ },
+ {},
+};
+
+static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ if (adc->dma_chan) {
+ dma_free_coherent(adc->dma_chan->device->dev,
+ DFSDM_DMA_BUFFER_SIZE,
+ adc->rx_buf, adc->dma_buf);
+ dma_release_channel(adc->dma_chan);
+ }
+}
+
+static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct dma_slave_config config = {
+ .src_addr = (dma_addr_t)adc->dfsdm->phys_base +
+ DFSDM_RDATAR(adc->fl_id),
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ };
+ int ret;
+
+ adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
+ if (!adc->dma_chan)
+ return -EINVAL;
+
+ adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
+ DFSDM_DMA_BUFFER_SIZE,
+ &adc->dma_buf, GFP_KERNEL);
+ if (!adc->rx_buf) {
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ ret = dmaengine_slave_config(adc->dma_chan, &config);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dma_free_coherent(adc->dma_chan->device->dev, DFSDM_DMA_BUFFER_SIZE,
+ adc->rx_buf, adc->dma_buf);
+err_release:
+ dma_release_channel(adc->dma_chan);
+
+ return ret;
+}
+
+static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
+ struct iio_chan_spec *ch)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ ret = stm32_dfsdm_channel_parse_of(adc->dfsdm, indio_dev, ch);
+ if (ret < 0)
+ return ret;
+
+ ch->type = IIO_VOLTAGE;
+ ch->indexed = 1;
+
+ /*
+ * IIO_CHAN_INFO_RAW: used to compute regular conversion
+ * IIO_CHAN_INFO_OVERSAMPLING_RATIO: used to set oversampling
+ */
+ ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+
+ if (adc->dev_data->type == DFSDM_AUDIO) {
+ ch->scan_type.sign = 's';
+ ch->ext_info = dfsdm_adc_audio_ext_info;
+ } else {
+ ch->scan_type.sign = 'u';
+ }
+ ch->scan_type.realbits = 24;
+ ch->scan_type.storagebits = 32;
+ adc->ch_id = ch->channel;
+
+ return stm32_dfsdm_chan_configure(adc->dfsdm,
+ &adc->dfsdm->ch_list[ch->channel]);
+}
+
+static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
+{
+ struct iio_chan_spec *ch;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_channel *d_ch;
+ int ret;
+
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
+ indio_dev->setup_ops = &stm32_dfsdm_buffer_setup_ops;
+
+ ch = devm_kzalloc(&indio_dev->dev, sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
+
+ ch->scan_index = 0;
+
+ ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Channels init failed\n");
+ return ret;
+ }
+ ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ);
+
+ d_ch = &adc->dfsdm->ch_list[adc->ch_id];
+ if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ adc->spi_freq = adc->dfsdm->spi_master_freq;
+
+ indio_dev->num_channels = 1;
+ indio_dev->channels = ch;
+
+ return stm32_dfsdm_dma_request(indio_dev);
+}
+
+static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
+{
+ struct iio_chan_spec *ch;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int num_ch;
+ int ret, chan_idx;
+
+ adc->oversamp = DFSDM_DEFAULT_OVERSAMPLING;
+ ret = stm32_dfsdm_set_osrs(&adc->dfsdm->fl_list[adc->fl_id], 0,
+ adc->oversamp);
+ if (ret < 0)
+ return ret;
+
+ num_ch = of_property_count_u32_elems(indio_dev->dev.of_node,
+ "st,adc-channels");
+ if (num_ch < 0 || num_ch > adc->dfsdm->num_chs) {
+ dev_err(&indio_dev->dev, "Bad st,adc-channels\n");
+ return num_ch < 0 ? num_ch : -EINVAL;
+ }
+
+ /* Bind to SD modulator IIO device */
+ adc->hwc = devm_iio_hw_consumer_alloc(&indio_dev->dev);
+ if (IS_ERR(adc->hwc))
+ return -EPROBE_DEFER;
+
+ ch = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*ch),
+ GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
+
+ for (chan_idx = 0; chan_idx < num_ch; chan_idx++) {
+ ch->scan_index = chan_idx;
+ ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Channels init failed\n");
+ return ret;
+ }
+ }
+
+ indio_dev->num_channels = num_ch;
+ indio_dev->channels = ch;
+
+ init_completion(&adc->completion);
+
+ return 0;
+}
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_adc_data = {
+ .type = DFSDM_IIO,
+ .init = stm32_dfsdm_adc_init,
+};
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_audio_data = {
+ .type = DFSDM_AUDIO,
+ .init = stm32_dfsdm_audio_init,
+};
+
+static const struct of_device_id stm32_dfsdm_adc_match[] = {
+ {
+ .compatible = "st,stm32-dfsdm-adc",
+ .data = &stm32h7_dfsdm_adc_data,
+ },
+ {
+ .compatible = "st,stm32-dfsdm-dmic",
+ .data = &stm32h7_dfsdm_audio_data,
+ },
+ {}
+};
+
+static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_dfsdm_adc *adc;
+ struct device_node *np = dev->of_node;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ struct iio_dev *iio;
+ char *name;
+ int ret, irq, val;
+
+
+ dev_data = of_device_get_match_data(dev);
+ iio = devm_iio_device_alloc(dev, sizeof(*adc));
+ if (!iio) {
+ dev_err(dev, "%s: Failed to allocate IIO\n", __func__);
+ return -ENOMEM;
+ }
+
+ adc = iio_priv(iio);
+ adc->dfsdm = dev_get_drvdata(dev->parent);
+
+ iio->dev.parent = dev;
+ iio->dev.of_node = np;
+ iio->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
+
+ platform_set_drvdata(pdev, adc);
+
+ ret = of_property_read_u32(dev->of_node, "reg", &adc->fl_id);
+ if (ret != 0) {
+ dev_err(dev, "Missing reg property\n");
+ return -EINVAL;
+ }
+
+ name = devm_kzalloc(dev, sizeof("dfsdm-adc0"), GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ if (dev_data->type == DFSDM_AUDIO) {
+ iio->info = &stm32_dfsdm_info_audio;
+ snprintf(name, sizeof("dfsdm-pdm0"), "dfsdm-pdm%d", adc->fl_id);
+ } else {
+ iio->info = &stm32_dfsdm_info_adc;
+ snprintf(name, sizeof("dfsdm-adc0"), "dfsdm-adc%d", adc->fl_id);
+ }
+ iio->name = name;
+
+ /*
+ * In a first step IRQs generated for channels are not treated.
+ * So IRQ associated to filter instance 0 is dedicated to the Filter 0.
+ */
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, irq, stm32_dfsdm_irq,
+ 0, pdev->name, adc);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "st,filter-order", &val);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set filter order\n");
+ return ret;
+ }
+
+ adc->dfsdm->fl_list[adc->fl_id].ford = val;
+
+ ret = of_property_read_u32(dev->of_node, "st,filter0-sync", &val);
+ if (!ret)
+ adc->dfsdm->fl_list[adc->fl_id].sync_mode = val;
+
+ adc->dev_data = dev_data;
+ ret = dev_data->init(iio);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(iio);
+ if (ret < 0)
+ goto err_cleanup;
+
+ dev_err(dev, "of_platform_populate\n");
+ if (dev_data->type == DFSDM_AUDIO) {
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to find an audio DAI\n");
+ goto err_unregister;
+ }
+ }
+
+ return 0;
+
+err_unregister:
+ iio_device_unregister(iio);
+err_cleanup:
+ stm32_dfsdm_dma_release(iio);
+
+ return ret;
+}
+
+static int stm32_dfsdm_adc_remove(struct platform_device *pdev)
+{
+ struct stm32_dfsdm_adc *adc = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+
+ if (adc->dev_data->type == DFSDM_AUDIO)
+ of_platform_depopulate(&pdev->dev);
+ iio_device_unregister(indio_dev);
+ stm32_dfsdm_dma_release(indio_dev);
+
+ return 0;
+}
+
+static struct platform_driver stm32_dfsdm_adc_driver = {
+ .driver = {
+ .name = "stm32-dfsdm-adc",
+ .of_match_table = stm32_dfsdm_adc_match,
+ },
+ .probe = stm32_dfsdm_adc_probe,
+ .remove = stm32_dfsdm_adc_remove,
+};
+module_platform_driver(stm32_dfsdm_adc_driver);
+
+MODULE_DESCRIPTION("STM32 sigma delta ADC");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
new file mode 100644
index 0000000..6290332
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is part the core part STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "stm32-dfsdm.h"
+
+struct stm32_dfsdm_dev_data {
+ unsigned int num_filters;
+ unsigned int num_channels;
+ const struct regmap_config *regmap_cfg;
+};
+
+#define STM32H7_DFSDM_NUM_FILTERS 4
+#define STM32H7_DFSDM_NUM_CHANNELS 8
+
+static bool stm32_dfsdm_volatile_reg(struct device *dev, unsigned int reg)
+{
+ if (reg < DFSDM_FILTER_BASE_ADR)
+ return false;
+
+ /*
+ * Mask is done on register to avoid to list registers of all
+ * filter instances.
+ */
+ switch (reg & DFSDM_FILTER_REG_MASK) {
+ case DFSDM_CR1(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_ISR(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_JDATAR(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_RDATAR(0) & DFSDM_FILTER_REG_MASK:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config stm32h7_dfsdm_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = sizeof(u32),
+ .max_register = 0x2B8,
+ .volatile_reg = stm32_dfsdm_volatile_reg,
+ .fast_io = true,
+};
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_data = {
+ .num_filters = STM32H7_DFSDM_NUM_FILTERS,
+ .num_channels = STM32H7_DFSDM_NUM_CHANNELS,
+ .regmap_cfg = &stm32h7_dfsdm_regmap_cfg,
+};
+
+struct dfsdm_priv {
+ struct platform_device *pdev; /* platform device */
+
+ struct stm32_dfsdm dfsdm; /* common data exported for all instances */
+
+ unsigned int spi_clk_out_div; /* SPI clkout divider value */
+ atomic_t n_active_ch; /* number of current active channels */
+
+ struct clk *clk; /* DFSDM clock */
+ struct clk *aclk; /* audio clock */
+};
+
+/**
+ * stm32_dfsdm_start_dfsdm - start global dfsdm interface.
+ *
+ * Enable interface if n_active_ch is not null.
+ * @dfsdm: Handle used to retrieve dfsdm context.
+ */
+int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
+{
+ struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
+ struct device *dev = &priv->pdev->dev;
+ unsigned int clk_div = priv->spi_clk_out_div;
+ int ret;
+
+ if (atomic_inc_return(&priv->n_active_ch) == 1) {
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to start clock\n");
+ goto error_ret;
+ }
+ if (priv->aclk) {
+ ret = clk_prepare_enable(priv->aclk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to start audio clock\n");
+ goto disable_clk;
+ }
+ }
+
+ /* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_CKOUTDIV_MASK,
+ DFSDM_CHCFGR1_CKOUTDIV(clk_div));
+ if (ret < 0)
+ goto disable_aclk;
+
+ /* Global enable of DFSDM interface */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_DFSDMEN_MASK,
+ DFSDM_CHCFGR1_DFSDMEN(1));
+ if (ret < 0)
+ goto disable_aclk;
+ }
+
+ dev_dbg(dev, "%s: n_active_ch %d\n", __func__,
+ atomic_read(&priv->n_active_ch));
+
+ return 0;
+
+disable_aclk:
+ clk_disable_unprepare(priv->aclk);
+disable_clk:
+ clk_disable_unprepare(priv->clk);
+
+error_ret:
+ atomic_dec(&priv->n_active_ch);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_start_dfsdm);
+
+/**
+ * stm32_dfsdm_stop_dfsdm - stop global DFSDM interface.
+ *
+ * Disable interface if n_active_ch is null
+ * @dfsdm: Handle used to retrieve dfsdm context.
+ */
+int stm32_dfsdm_stop_dfsdm(struct stm32_dfsdm *dfsdm)
+{
+ struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
+ int ret;
+
+ if (atomic_dec_and_test(&priv->n_active_ch)) {
+ /* Global disable of DFSDM interface */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_DFSDMEN_MASK,
+ DFSDM_CHCFGR1_DFSDMEN(0));
+ if (ret < 0)
+ return ret;
+
+ /* Stop SPI CLKOUT */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_CKOUTDIV_MASK,
+ DFSDM_CHCFGR1_CKOUTDIV(0));
+ if (ret < 0)
+ return ret;
+
+ clk_disable_unprepare(priv->clk);
+ if (priv->aclk)
+ clk_disable_unprepare(priv->aclk);
+ }
+ dev_dbg(&priv->pdev->dev, "%s: n_active_ch %d\n", __func__,
+ atomic_read(&priv->n_active_ch));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_stop_dfsdm);
+
+static int stm32_dfsdm_parse_of(struct platform_device *pdev,
+ struct dfsdm_priv *priv)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res;
+ unsigned long clk_freq;
+ unsigned int spi_freq, rem;
+ int ret;
+
+ if (!node)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return -ENODEV;
+ }
+ priv->dfsdm.phys_base = res->start;
+ priv->dfsdm.base = devm_ioremap_resource(&pdev->dev, res);
+
+ /*
+ * "dfsdm" clock is mandatory for DFSDM peripheral clocking.
+ * "dfsdm" or "audio" clocks can be used as source clock for
+ * the SPI clock out signal and internal processing, depending
+ * on use case.
+ */
+ priv->clk = devm_clk_get(&pdev->dev, "dfsdm");
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "No stm32_dfsdm_clk clock found\n");
+ return -EINVAL;
+ }
+
+ priv->aclk = devm_clk_get(&pdev->dev, "audio");
+ if (IS_ERR(priv->aclk))
+ priv->aclk = NULL;
+
+ if (priv->aclk)
+ clk_freq = clk_get_rate(priv->aclk);
+ else
+ clk_freq = clk_get_rate(priv->clk);
+
+ /* SPI clock out frequency */
+ ret = of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+ &spi_freq);
+ if (ret < 0) {
+ /* No SPI master mode */
+ return 0;
+ }
+
+ priv->spi_clk_out_div = div_u64_rem(clk_freq, spi_freq, &rem) - 1;
+ priv->dfsdm.spi_master_freq = spi_freq;
+
+ if (rem) {
+ dev_warn(&pdev->dev, "SPI clock not accurate\n");
+ dev_warn(&pdev->dev, "%ld = %d * %d + %d\n",
+ clk_freq, spi_freq, priv->spi_clk_out_div + 1, rem);
+ }
+
+ return 0;
+};
+
+static const struct of_device_id stm32_dfsdm_of_match[] = {
+ {
+ .compatible = "st,stm32h7-dfsdm",
+ .data = &stm32h7_dfsdm_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_dfsdm_of_match);
+
+static int stm32_dfsdm_probe(struct platform_device *pdev)
+{
+ struct dfsdm_priv *priv;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ struct stm32_dfsdm *dfsdm;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = pdev;
+
+ dev_data = of_device_get_match_data(&pdev->dev);
+
+ dfsdm = &priv->dfsdm;
+ dfsdm->fl_list = devm_kcalloc(&pdev->dev, dev_data->num_filters,
+ sizeof(*dfsdm->fl_list), GFP_KERNEL);
+ if (!dfsdm->fl_list)
+ return -ENOMEM;
+
+ dfsdm->num_fls = dev_data->num_filters;
+ dfsdm->ch_list = devm_kcalloc(&pdev->dev, dev_data->num_channels,
+ sizeof(*dfsdm->ch_list),
+ GFP_KERNEL);
+ if (!dfsdm->ch_list)
+ return -ENOMEM;
+ dfsdm->num_chs = dev_data->num_channels;
+
+ ret = stm32_dfsdm_parse_of(pdev, priv);
+ if (ret < 0)
+ return ret;
+
+ dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm",
+ dfsdm->base,
+ &stm32h7_dfsdm_regmap_cfg);
+ if (IS_ERR(dfsdm->regmap)) {
+ ret = PTR_ERR(dfsdm->regmap);
+ dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, dfsdm);
+
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static struct platform_driver stm32_dfsdm_driver = {
+ .probe = stm32_dfsdm_probe,
+ .driver = {
+ .name = "stm32-dfsdm",
+ .of_match_table = stm32_dfsdm_of_match,
+ },
+};
+
+module_platform_driver(stm32_dfsdm_driver);
+
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 dfsdm driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm.h b/drivers/iio/adc/stm32-dfsdm.h
new file mode 100644
index 0000000..8708394
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is part of STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#ifndef MDF_STM32_DFSDM__H
+#define MDF_STM32_DFSDM__H
+
+#include <linux/bitfield.h>
+
+/*
+ * STM32 DFSDM - global register map
+ * ________________________________________________________
+ * | Offset | Registers block |
+ * --------------------------------------------------------
+ * | 0x000 | CHANNEL 0 + COMMON CHANNEL FIELDS |
+ * --------------------------------------------------------
+ * | 0x020 | CHANNEL 1 |
+ * --------------------------------------------------------
+ * | ... | ..... |
+ * --------------------------------------------------------
+ * | 0x0E0 | CHANNEL 7 |
+ * --------------------------------------------------------
+ * | 0x100 | FILTER 0 + COMMON FILTER FIELDs |
+ * --------------------------------------------------------
+ * | 0x200 | FILTER 1 |
+ * --------------------------------------------------------
+ * | 0x300 | FILTER 2 |
+ * --------------------------------------------------------
+ * | 0x400 | FILTER 3 |
+ * --------------------------------------------------------
+ */
+
+/*
+ * Channels register definitions
+ */
+#define DFSDM_CHCFGR1(y) ((y) * 0x20 + 0x00)
+#define DFSDM_CHCFGR2(y) ((y) * 0x20 + 0x04)
+#define DFSDM_AWSCDR(y) ((y) * 0x20 + 0x08)
+#define DFSDM_CHWDATR(y) ((y) * 0x20 + 0x0C)
+#define DFSDM_CHDATINR(y) ((y) * 0x20 + 0x10)
+
+/* CHCFGR1: Channel configuration register 1 */
+#define DFSDM_CHCFGR1_SITP_MASK GENMASK(1, 0)
+#define DFSDM_CHCFGR1_SITP(v) FIELD_PREP(DFSDM_CHCFGR1_SITP_MASK, v)
+#define DFSDM_CHCFGR1_SPICKSEL_MASK GENMASK(3, 2)
+#define DFSDM_CHCFGR1_SPICKSEL(v) FIELD_PREP(DFSDM_CHCFGR1_SPICKSEL_MASK, v)
+#define DFSDM_CHCFGR1_SCDEN_MASK BIT(5)
+#define DFSDM_CHCFGR1_SCDEN(v) FIELD_PREP(DFSDM_CHCFGR1_SCDEN_MASK, v)
+#define DFSDM_CHCFGR1_CKABEN_MASK BIT(6)
+#define DFSDM_CHCFGR1_CKABEN(v) FIELD_PREP(DFSDM_CHCFGR1_CKABEN_MASK, v)
+#define DFSDM_CHCFGR1_CHEN_MASK BIT(7)
+#define DFSDM_CHCFGR1_CHEN(v) FIELD_PREP(DFSDM_CHCFGR1_CHEN_MASK, v)
+#define DFSDM_CHCFGR1_CHINSEL_MASK BIT(8)
+#define DFSDM_CHCFGR1_CHINSEL(v) FIELD_PREP(DFSDM_CHCFGR1_CHINSEL_MASK, v)
+#define DFSDM_CHCFGR1_DATMPX_MASK GENMASK(13, 12)
+#define DFSDM_CHCFGR1_DATMPX(v) FIELD_PREP(DFSDM_CHCFGR1_DATMPX_MASK, v)
+#define DFSDM_CHCFGR1_DATPACK_MASK GENMASK(15, 14)
+#define DFSDM_CHCFGR1_DATPACK(v) FIELD_PREP(DFSDM_CHCFGR1_DATPACK_MASK, v)
+#define DFSDM_CHCFGR1_CKOUTDIV_MASK GENMASK(23, 16)
+#define DFSDM_CHCFGR1_CKOUTDIV(v) FIELD_PREP(DFSDM_CHCFGR1_CKOUTDIV_MASK, v)
+#define DFSDM_CHCFGR1_CKOUTSRC_MASK BIT(30)
+#define DFSDM_CHCFGR1_CKOUTSRC(v) FIELD_PREP(DFSDM_CHCFGR1_CKOUTSRC_MASK, v)
+#define DFSDM_CHCFGR1_DFSDMEN_MASK BIT(31)
+#define DFSDM_CHCFGR1_DFSDMEN(v) FIELD_PREP(DFSDM_CHCFGR1_DFSDMEN_MASK, v)
+
+/* CHCFGR2: Channel configuration register 2 */
+#define DFSDM_CHCFGR2_DTRBS_MASK GENMASK(7, 3)
+#define DFSDM_CHCFGR2_DTRBS(v) FIELD_PREP(DFSDM_CHCFGR2_DTRBS_MASK, v)
+#define DFSDM_CHCFGR2_OFFSET_MASK GENMASK(31, 8)
+#define DFSDM_CHCFGR2_OFFSET(v) FIELD_PREP(DFSDM_CHCFGR2_OFFSET_MASK, v)
+
+/* AWSCDR: Channel analog watchdog and short circuit detector */
+#define DFSDM_AWSCDR_SCDT_MASK GENMASK(7, 0)
+#define DFSDM_AWSCDR_SCDT(v) FIELD_PREP(DFSDM_AWSCDR_SCDT_MASK, v)
+#define DFSDM_AWSCDR_BKSCD_MASK GENMASK(15, 12)
+#define DFSDM_AWSCDR_BKSCD(v) FIELD_PREP(DFSDM_AWSCDR_BKSCD_MASK, v)
+#define DFSDM_AWSCDR_AWFOSR_MASK GENMASK(20, 16)
+#define DFSDM_AWSCDR_AWFOSR(v) FIELD_PREP(DFSDM_AWSCDR_AWFOSR_MASK, v)
+#define DFSDM_AWSCDR_AWFORD_MASK GENMASK(23, 22)
+#define DFSDM_AWSCDR_AWFORD(v) FIELD_PREP(DFSDM_AWSCDR_AWFORD_MASK, v)
+
+/*
+ * Filters register definitions
+ */
+#define DFSDM_FILTER_BASE_ADR 0x100
+#define DFSDM_FILTER_REG_MASK 0x7F
+#define DFSDM_FILTER_X_BASE_ADR(x) ((x) * 0x80 + DFSDM_FILTER_BASE_ADR)
+
+#define DFSDM_CR1(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x00)
+#define DFSDM_CR2(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x04)
+#define DFSDM_ISR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x08)
+#define DFSDM_ICR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x0C)
+#define DFSDM_JCHGR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x10)
+#define DFSDM_FCR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x14)
+#define DFSDM_JDATAR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x18)
+#define DFSDM_RDATAR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x1C)
+#define DFSDM_AWHTR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x20)
+#define DFSDM_AWLTR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x24)
+#define DFSDM_AWSR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x28)
+#define DFSDM_AWCFR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x2C)
+#define DFSDM_EXMAX(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x30)
+#define DFSDM_EXMIN(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x34)
+#define DFSDM_CNVTIMR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x38)
+
+/* CR1 Control register 1 */
+#define DFSDM_CR1_DFEN_MASK BIT(0)
+#define DFSDM_CR1_DFEN(v) FIELD_PREP(DFSDM_CR1_DFEN_MASK, v)
+#define DFSDM_CR1_JSWSTART_MASK BIT(1)
+#define DFSDM_CR1_JSWSTART(v) FIELD_PREP(DFSDM_CR1_JSWSTART_MASK, v)
+#define DFSDM_CR1_JSYNC_MASK BIT(3)
+#define DFSDM_CR1_JSYNC(v) FIELD_PREP(DFSDM_CR1_JSYNC_MASK, v)
+#define DFSDM_CR1_JSCAN_MASK BIT(4)
+#define DFSDM_CR1_JSCAN(v) FIELD_PREP(DFSDM_CR1_JSCAN_MASK, v)
+#define DFSDM_CR1_JDMAEN_MASK BIT(5)
+#define DFSDM_CR1_JDMAEN(v) FIELD_PREP(DFSDM_CR1_JDMAEN_MASK, v)
+#define DFSDM_CR1_JEXTSEL_MASK GENMASK(12, 8)
+#define DFSDM_CR1_JEXTSEL(v) FIELD_PREP(DFSDM_CR1_JEXTSEL_MASK, v)
+#define DFSDM_CR1_JEXTEN_MASK GENMASK(14, 13)
+#define DFSDM_CR1_JEXTEN(v) FIELD_PREP(DFSDM_CR1_JEXTEN_MASK, v)
+#define DFSDM_CR1_RSWSTART_MASK BIT(17)
+#define DFSDM_CR1_RSWSTART(v) FIELD_PREP(DFSDM_CR1_RSWSTART_MASK, v)
+#define DFSDM_CR1_RCONT_MASK BIT(18)
+#define DFSDM_CR1_RCONT(v) FIELD_PREP(DFSDM_CR1_RCONT_MASK, v)
+#define DFSDM_CR1_RSYNC_MASK BIT(19)
+#define DFSDM_CR1_RSYNC(v) FIELD_PREP(DFSDM_CR1_RSYNC_MASK, v)
+#define DFSDM_CR1_RDMAEN_MASK BIT(21)
+#define DFSDM_CR1_RDMAEN(v) FIELD_PREP(DFSDM_CR1_RDMAEN_MASK, v)
+#define DFSDM_CR1_RCH_MASK GENMASK(26, 24)
+#define DFSDM_CR1_RCH(v) FIELD_PREP(DFSDM_CR1_RCH_MASK, v)
+#define DFSDM_CR1_FAST_MASK BIT(29)
+#define DFSDM_CR1_FAST(v) FIELD_PREP(DFSDM_CR1_FAST_MASK, v)
+#define DFSDM_CR1_AWFSEL_MASK BIT(30)
+#define DFSDM_CR1_AWFSEL(v) FIELD_PREP(DFSDM_CR1_AWFSEL_MASK, v)
+
+/* CR2: Control register 2 */
+#define DFSDM_CR2_IE_MASK GENMASK(6, 0)
+#define DFSDM_CR2_IE(v) FIELD_PREP(DFSDM_CR2_IE_MASK, v)
+#define DFSDM_CR2_JEOCIE_MASK BIT(0)
+#define DFSDM_CR2_JEOCIE(v) FIELD_PREP(DFSDM_CR2_JEOCIE_MASK, v)
+#define DFSDM_CR2_REOCIE_MASK BIT(1)
+#define DFSDM_CR2_REOCIE(v) FIELD_PREP(DFSDM_CR2_REOCIE_MASK, v)
+#define DFSDM_CR2_JOVRIE_MASK BIT(2)
+#define DFSDM_CR2_JOVRIE(v) FIELD_PREP(DFSDM_CR2_JOVRIE_MASK, v)
+#define DFSDM_CR2_ROVRIE_MASK BIT(3)
+#define DFSDM_CR2_ROVRIE(v) FIELD_PREP(DFSDM_CR2_ROVRIE_MASK, v)
+#define DFSDM_CR2_AWDIE_MASK BIT(4)
+#define DFSDM_CR2_AWDIE(v) FIELD_PREP(DFSDM_CR2_AWDIE_MASK, v)
+#define DFSDM_CR2_SCDIE_MASK BIT(5)
+#define DFSDM_CR2_SCDIE(v) FIELD_PREP(DFSDM_CR2_SCDIE_MASK, v)
+#define DFSDM_CR2_CKABIE_MASK BIT(6)
+#define DFSDM_CR2_CKABIE(v) FIELD_PREP(DFSDM_CR2_CKABIE_MASK, v)
+#define DFSDM_CR2_EXCH_MASK GENMASK(15, 8)
+#define DFSDM_CR2_EXCH(v) FIELD_PREP(DFSDM_CR2_EXCH_MASK, v)
+#define DFSDM_CR2_AWDCH_MASK GENMASK(23, 16)
+#define DFSDM_CR2_AWDCH(v) FIELD_PREP(DFSDM_CR2_AWDCH_MASK, v)
+
+/* ISR: Interrupt status register */
+#define DFSDM_ISR_JEOCF_MASK BIT(0)
+#define DFSDM_ISR_JEOCF(v) FIELD_PREP(DFSDM_ISR_JEOCF_MASK, v)
+#define DFSDM_ISR_REOCF_MASK BIT(1)
+#define DFSDM_ISR_REOCF(v) FIELD_PREP(DFSDM_ISR_REOCF_MASK, v)
+#define DFSDM_ISR_JOVRF_MASK BIT(2)
+#define DFSDM_ISR_JOVRF(v) FIELD_PREP(DFSDM_ISR_JOVRF_MASK, v)
+#define DFSDM_ISR_ROVRF_MASK BIT(3)
+#define DFSDM_ISR_ROVRF(v) FIELD_PREP(DFSDM_ISR_ROVRF_MASK, v)
+#define DFSDM_ISR_AWDF_MASK BIT(4)
+#define DFSDM_ISR_AWDF(v) FIELD_PREP(DFSDM_ISR_AWDF_MASK, v)
+#define DFSDM_ISR_JCIP_MASK BIT(13)
+#define DFSDM_ISR_JCIP(v) FIELD_PREP(DFSDM_ISR_JCIP_MASK, v)
+#define DFSDM_ISR_RCIP_MASK BIT(14)
+#define DFSDM_ISR_RCIP(v) FIELD_PREP(DFSDM_ISR_RCIP, v)
+#define DFSDM_ISR_CKABF_MASK GENMASK(23, 16)
+#define DFSDM_ISR_CKABF(v) FIELD_PREP(DFSDM_ISR_CKABF_MASK, v)
+#define DFSDM_ISR_SCDF_MASK GENMASK(31, 24)
+#define DFSDM_ISR_SCDF(v) FIELD_PREP(DFSDM_ISR_SCDF_MASK, v)
+
+/* ICR: Interrupt flag clear register */
+#define DFSDM_ICR_CLRJOVRF_MASK BIT(2)
+#define DFSDM_ICR_CLRJOVRF(v) FIELD_PREP(DFSDM_ICR_CLRJOVRF_MASK, v)
+#define DFSDM_ICR_CLRROVRF_MASK BIT(3)
+#define DFSDM_ICR_CLRROVRF(v) FIELD_PREP(DFSDM_ICR_CLRROVRF_MASK, v)
+#define DFSDM_ICR_CLRCKABF_MASK GENMASK(23, 16)
+#define DFSDM_ICR_CLRCKABF(v) FIELD_PREP(DFSDM_ICR_CLRCKABF_MASK, v)
+#define DFSDM_ICR_CLRCKABF_CH_MASK(y) BIT(16 + (y))
+#define DFSDM_ICR_CLRCKABF_CH(v, y) \
+ (((v) << (16 + (y))) & DFSDM_ICR_CLRCKABF_CH_MASK(y))
+#define DFSDM_ICR_CLRSCDF_MASK GENMASK(31, 24)
+#define DFSDM_ICR_CLRSCDF(v) FIELD_PREP(DFSDM_ICR_CLRSCDF_MASK, v)
+#define DFSDM_ICR_CLRSCDF_CH_MASK(y) BIT(24 + (y))
+#define DFSDM_ICR_CLRSCDF_CH(v, y) \
+ (((v) << (24 + (y))) & DFSDM_ICR_CLRSCDF_MASK(y))
+
+/* FCR: Filter control register */
+#define DFSDM_FCR_IOSR_MASK GENMASK(7, 0)
+#define DFSDM_FCR_IOSR(v) FIELD_PREP(DFSDM_FCR_IOSR_MASK, v)
+#define DFSDM_FCR_FOSR_MASK GENMASK(25, 16)
+#define DFSDM_FCR_FOSR(v) FIELD_PREP(DFSDM_FCR_FOSR_MASK, v)
+#define DFSDM_FCR_FORD_MASK GENMASK(31, 29)
+#define DFSDM_FCR_FORD(v) FIELD_PREP(DFSDM_FCR_FORD_MASK, v)
+
+/* RDATAR: Filter data register for regular channel */
+#define DFSDM_DATAR_CH_MASK GENMASK(2, 0)
+#define DFSDM_DATAR_DATA_OFFSET 8
+#define DFSDM_DATAR_DATA_MASK GENMASK(31, DFSDM_DATAR_DATA_OFFSET)
+
+/* AWLTR: Filter analog watchdog low threshold register */
+#define DFSDM_AWLTR_BKAWL_MASK GENMASK(3, 0)
+#define DFSDM_AWLTR_BKAWL(v) FIELD_PREP(DFSDM_AWLTR_BKAWL_MASK, v)
+#define DFSDM_AWLTR_AWLT_MASK GENMASK(31, 8)
+#define DFSDM_AWLTR_AWLT(v) FIELD_PREP(DFSDM_AWLTR_AWLT_MASK, v)
+
+/* AWHTR: Filter analog watchdog low threshold register */
+#define DFSDM_AWHTR_BKAWH_MASK GENMASK(3, 0)
+#define DFSDM_AWHTR_BKAWH(v) FIELD_PREP(DFSDM_AWHTR_BKAWH_MASK, v)
+#define DFSDM_AWHTR_AWHT_MASK GENMASK(31, 8)
+#define DFSDM_AWHTR_AWHT(v) FIELD_PREP(DFSDM_AWHTR_AWHT_MASK, v)
+
+/* AWSR: Filter watchdog status register */
+#define DFSDM_AWSR_AWLTF_MASK GENMASK(7, 0)
+#define DFSDM_AWSR_AWLTF(v) FIELD_PREP(DFSDM_AWSR_AWLTF_MASK, v)
+#define DFSDM_AWSR_AWHTF_MASK GENMASK(15, 8)
+#define DFSDM_AWSR_AWHTF(v) FIELD_PREP(DFSDM_AWSR_AWHTF_MASK, v)
+
+/* AWCFR: Filter watchdog status register */
+#define DFSDM_AWCFR_AWLTF_MASK GENMASK(7, 0)
+#define DFSDM_AWCFR_AWLTF(v) FIELD_PREP(DFSDM_AWCFR_AWLTF_MASK, v)
+#define DFSDM_AWCFR_AWHTF_MASK GENMASK(15, 8)
+#define DFSDM_AWCFR_AWHTF(v) FIELD_PREP(DFSDM_AWCFR_AWHTF_MASK, v)
+
+/* DFSDM filter order */
+enum stm32_dfsdm_sinc_order {
+ DFSDM_FASTSINC_ORDER, /* FastSinc filter type */
+ DFSDM_SINC1_ORDER, /* Sinc 1 filter type */
+ DFSDM_SINC2_ORDER, /* Sinc 2 filter type */
+ DFSDM_SINC3_ORDER, /* Sinc 3 filter type */
+ DFSDM_SINC4_ORDER, /* Sinc 4 filter type (N.A. for watchdog) */
+ DFSDM_SINC5_ORDER, /* Sinc 5 filter type (N.A. for watchdog) */
+ DFSDM_NB_SINC_ORDER,
+};
+
+/**
+ * struct stm32_dfsdm_filter - structure relative to stm32 FDSDM filter
+ * @iosr: integrator oversampling
+ * @fosr: filter oversampling
+ * @ford: filter order
+ * @res: output sample resolution
+ * @sync_mode: filter synchronized with filter 0
+ * @fast: filter fast mode
+ */
+struct stm32_dfsdm_filter {
+ unsigned int iosr;
+ unsigned int fosr;
+ enum stm32_dfsdm_sinc_order ford;
+ u64 res;
+ unsigned int sync_mode;
+ unsigned int fast;
+};
+
+/**
+ * struct stm32_dfsdm_channel - structure relative to stm32 FDSDM channel
+ * @id: id of the channel
+ * @type: interface type linked to stm32_dfsdm_chan_type
+ * @src: interface type linked to stm32_dfsdm_chan_src
+ * @alt_si: alternative serial input interface
+ */
+struct stm32_dfsdm_channel {
+ unsigned int id;
+ unsigned int type;
+ unsigned int src;
+ unsigned int alt_si;
+};
+
+/**
+ * struct stm32_dfsdm - stm32 FDSDM driver common data (for all instances)
+ * @base: control registers base cpu addr
+ * @phys_base: DFSDM IP register physical address
+ * @regmap: regmap for register read/write
+ * @fl_list: filter resources list
+ * @num_fls: number of filter resources available
+ * @ch_list: channel resources list
+ * @num_chs: number of channel resources available
+ * @spi_master_freq: SPI clock out frequency
+ */
+struct stm32_dfsdm {
+ void __iomem *base;
+ phys_addr_t phys_base;
+ struct regmap *regmap;
+ struct stm32_dfsdm_filter *fl_list;
+ unsigned int num_fls;
+ struct stm32_dfsdm_channel *ch_list;
+ unsigned int num_chs;
+ unsigned int spi_master_freq;
+};
+
+/* DFSDM channel serial spi clock source */
+enum stm32_dfsdm_spi_clk_src {
+ DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING
+};
+
+int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm);
+int stm32_dfsdm_stop_dfsdm(struct stm32_dfsdm *dfsdm);
+
+#endif
diff --git a/drivers/iio/buffer/Kconfig b/drivers/iio/buffer/Kconfig
index 4ffd3db..338774c 100644
--- a/drivers/iio/buffer/Kconfig
+++ b/drivers/iio/buffer/Kconfig
@@ -29,6 +29,16 @@ config IIO_BUFFER_DMAENGINE
Should be selected by drivers that want to use this functionality.
+config IIO_BUFFER_HW_CONSUMER
+ tristate "Industrial I/O HW buffering"
+ help
+ Provides a way to bonding when an IIO device has a direct connection
+ to another device in hardware. In this case buffers for data transfers
+ are handled by hardware.
+
+ Should be selected by drivers that want to use the generic Hw consumer
+ interface.
+
config IIO_KFIFO_BUF
tristate "Industrial I/O buffering based on kfifo"
help
diff --git a/drivers/iio/buffer/Makefile b/drivers/iio/buffer/Makefile
index 95f9f41..1403eb2 100644
--- a/drivers/iio/buffer/Makefile
+++ b/drivers/iio/buffer/Makefile
@@ -7,5 +7,6 @@
obj-$(CONFIG_IIO_BUFFER_CB) += industrialio-buffer-cb.o
obj-$(CONFIG_IIO_BUFFER_DMA) += industrialio-buffer-dma.o
obj-$(CONFIG_IIO_BUFFER_DMAENGINE) += industrialio-buffer-dmaengine.o
+obj-$(CONFIG_IIO_BUFFER_HW_CONSUMER) += industrialio-hw-consumer.o
obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o
obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o
diff --git a/drivers/iio/buffer/industrialio-buffer-cb.c b/drivers/iio/buffer/industrialio-buffer-cb.c
index 4847534..ea63c83 100644
--- a/drivers/iio/buffer/industrialio-buffer-cb.c
+++ b/drivers/iio/buffer/industrialio-buffer-cb.c
@@ -104,6 +104,17 @@ error_free_cb_buff:
}
EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
+int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buff,
+ size_t watermark)
+{
+ if (!watermark)
+ return -EINVAL;
+ cb_buff->buffer.watermark = watermark;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_channel_cb_set_buffer_watermark);
+
int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
{
return iio_update_buffers(cb_buff->indio_dev, &cb_buff->buffer,
diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c
new file mode 100644
index 0000000..9516569
--- /dev/null
+++ b/drivers/iio/buffer/industrialio-hw-consumer.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/hw-consumer.h>
+#include <linux/iio/buffer_impl.h>
+
+/**
+ * struct iio_hw_consumer - IIO hw consumer block
+ * @buffers: hardware buffers list head.
+ * @channels: IIO provider channels.
+ */
+struct iio_hw_consumer {
+ struct list_head buffers;
+ struct iio_channel *channels;
+};
+
+struct hw_consumer_buffer {
+ struct list_head head;
+ struct iio_dev *indio_dev;
+ struct iio_buffer buffer;
+ long scan_mask[];
+};
+
+static struct hw_consumer_buffer *iio_buffer_to_hw_consumer_buffer(
+ struct iio_buffer *buffer)
+{
+ return container_of(buffer, struct hw_consumer_buffer, buffer);
+}
+
+static void iio_hw_buf_release(struct iio_buffer *buffer)
+{
+ struct hw_consumer_buffer *hw_buf =
+ iio_buffer_to_hw_consumer_buffer(buffer);
+ kfree(hw_buf);
+}
+
+static const struct iio_buffer_access_funcs iio_hw_buf_access = {
+ .release = &iio_hw_buf_release,
+ .modes = INDIO_BUFFER_HARDWARE,
+};
+
+static struct hw_consumer_buffer *iio_hw_consumer_get_buffer(
+ struct iio_hw_consumer *hwc, struct iio_dev *indio_dev)
+{
+ size_t mask_size = BITS_TO_LONGS(indio_dev->masklength) * sizeof(long);
+ struct hw_consumer_buffer *buf;
+
+ list_for_each_entry(buf, &hwc->buffers, head) {
+ if (buf->indio_dev == indio_dev)
+ return buf;
+ }
+
+ buf = kzalloc(sizeof(*buf) + mask_size, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->buffer.access = &iio_hw_buf_access;
+ buf->indio_dev = indio_dev;
+ buf->buffer.scan_mask = buf->scan_mask;
+
+ iio_buffer_init(&buf->buffer);
+ list_add_tail(&buf->head, &hwc->buffers);
+
+ return buf;
+}
+
+/**
+ * iio_hw_consumer_alloc() - Allocate IIO hardware consumer
+ * @dev: Pointer to consumer device.
+ *
+ * Returns a valid iio_hw_consumer on success or a ERR_PTR() on failure.
+ */
+struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev)
+{
+ struct hw_consumer_buffer *buf;
+ struct iio_hw_consumer *hwc;
+ struct iio_channel *chan;
+ int ret;
+
+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
+ if (!hwc)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&hwc->buffers);
+
+ hwc->channels = iio_channel_get_all(dev);
+ if (IS_ERR(hwc->channels)) {
+ ret = PTR_ERR(hwc->channels);
+ goto err_free_hwc;
+ }
+
+ chan = &hwc->channels[0];
+ while (chan->indio_dev) {
+ buf = iio_hw_consumer_get_buffer(hwc, chan->indio_dev);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_put_buffers;
+ }
+ set_bit(chan->channel->scan_index, buf->buffer.scan_mask);
+ chan++;
+ }
+
+ return hwc;
+
+err_put_buffers:
+ list_for_each_entry(buf, &hwc->buffers, head)
+ iio_buffer_put(&buf->buffer);
+ iio_channel_release_all(hwc->channels);
+err_free_hwc:
+ kfree(hwc);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_alloc);
+
+/**
+ * iio_hw_consumer_free() - Free IIO hardware consumer
+ * @hwc: hw consumer to free.
+ */
+void iio_hw_consumer_free(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf, *n;
+
+ iio_channel_release_all(hwc->channels);
+ list_for_each_entry_safe(buf, n, &hwc->buffers, head)
+ iio_buffer_put(&buf->buffer);
+ kfree(hwc);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_free);
+
+static void devm_iio_hw_consumer_release(struct device *dev, void *res)
+{
+ iio_hw_consumer_free(*(struct iio_hw_consumer **)res);
+}
+
+static int devm_iio_hw_consumer_match(struct device *dev, void *res, void *data)
+{
+ struct iio_hw_consumer **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+ return *r == data;
+}
+
+/**
+ * devm_iio_hw_consumer_alloc - Resource-managed iio_hw_consumer_alloc()
+ * @dev: Pointer to consumer device.
+ *
+ * Managed iio_hw_consumer_alloc. iio_hw_consumer allocated with this function
+ * is automatically freed on driver detach.
+ *
+ * If an iio_hw_consumer allocated with this function needs to be freed
+ * separately, devm_iio_hw_consumer_free() must be used.
+ *
+ * returns pointer to allocated iio_hw_consumer on success, NULL on failure.
+ */
+struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
+{
+ struct iio_hw_consumer **ptr, *iio_hwc;
+
+ ptr = devres_alloc(devm_iio_hw_consumer_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ iio_hwc = iio_hw_consumer_alloc(dev);
+ if (IS_ERR(iio_hwc)) {
+ devres_free(ptr);
+ } else {
+ *ptr = iio_hwc;
+ devres_add(dev, ptr);
+ }
+
+ return iio_hwc;
+}
+EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_alloc);
+
+/**
+ * devm_iio_hw_consumer_free - Resource-managed iio_hw_consumer_free()
+ * @dev: Pointer to consumer device.
+ * @hwc: iio_hw_consumer to free.
+ *
+ * Free iio_hw_consumer allocated with devm_iio_hw_consumer_alloc().
+ */
+void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_iio_hw_consumer_release,
+ devm_iio_hw_consumer_match, hwc);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_free);
+
+/**
+ * iio_hw_consumer_enable() - Enable IIO hardware consumer
+ * @hwc: iio_hw_consumer to enable.
+ *
+ * Returns 0 on success.
+ */
+int iio_hw_consumer_enable(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf;
+ int ret;
+
+ list_for_each_entry(buf, &hwc->buffers, head) {
+ ret = iio_update_buffers(buf->indio_dev, &buf->buffer, NULL);
+ if (ret)
+ goto err_disable_buffers;
+ }
+
+ return 0;
+
+err_disable_buffers:
+ list_for_each_entry_continue_reverse(buf, &hwc->buffers, head)
+ iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_enable);
+
+/**
+ * iio_hw_consumer_disable() - Disable IIO hardware consumer
+ * @hwc: iio_hw_consumer to disable.
+ */
+void iio_hw_consumer_disable(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf;
+
+ list_for_each_entry(buf, &hwc->buffers, head)
+ iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_disable);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Hardware consumer buffer the IIO framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 069defc..ec98790 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -664,9 +664,8 @@ err_unlock:
}
EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
-static int iio_read_channel_attribute(struct iio_channel *chan,
- int *val, int *val2,
- enum iio_chan_info_enum attribute)
+int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
+ enum iio_chan_info_enum attribute)
{
int ret;
@@ -682,6 +681,7 @@ err_unlock:
return ret;
}
+EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
{
@@ -850,7 +850,8 @@ static int iio_channel_write(struct iio_channel *chan, int val, int val2,
chan->channel, val, val2, info);
}
-int iio_write_channel_raw(struct iio_channel *chan, int val)
+int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
+ enum iio_chan_info_enum attribute)
{
int ret;
@@ -860,12 +861,18 @@ int iio_write_channel_raw(struct iio_channel *chan, int val)
goto err_unlock;
}
- ret = iio_channel_write(chan, val, 0, IIO_CHAN_INFO_RAW);
+ ret = iio_channel_write(chan, val, val2, attribute);
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
+
+int iio_write_channel_raw(struct iio_channel *chan, int val)
+{
+ return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
+}
EXPORT_SYMBOL_GPL(iio_write_channel_raw);
unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index cbf1865..5cd7004 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -4,6 +4,7 @@ menuconfig INFINIBAND
depends on NET
depends on INET
depends on m || IPV6 != m
+ depends on !ALPHA
select IRQ_POLL
---help---
Core support for InfiniBand (IB). Make sure to also select
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f698335..6294a700 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4458,7 +4458,7 @@ out:
return skb->len;
}
-static const struct rdma_nl_cbs cma_cb_table[] = {
+static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = {
[RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
};
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index a1d687a..66f0268 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -314,7 +314,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
}
#endif
-struct ib_device *__ib_device_get_by_index(u32 ifindex);
+struct ib_device *ib_device_get_by_index(u32 ifindex);
/* RDMA device netlink */
void nldev_init(void);
void nldev_exit(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 5e1be49..4655206 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device)
return 0;
}
-struct ib_device *__ib_device_get_by_index(u32 index)
+static struct ib_device *__ib_device_get_by_index(u32 index)
{
struct ib_device *device;
@@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index)
return NULL;
}
+/*
+ * Caller is responsible to return refrerence count by calling put_device()
+ */
+struct ib_device *ib_device_get_by_index(u32 index)
+{
+ struct ib_device *device;
+
+ down_read(&lists_rwsem);
+ device = __ib_device_get_by_index(index);
+ if (device)
+ get_device(&device->dev);
+
+ up_read(&lists_rwsem);
+ return device;
+}
+
static struct ib_device *__ib_device_get_by_name(const char *name)
{
struct ib_device *device;
@@ -1146,7 +1162,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
}
EXPORT_SYMBOL(ib_get_net_dev_by_params);
-static const struct rdma_nl_cbs ibnl_ls_cb_table[] = {
+static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
[RDMA_NL_LS_OP_RESOLVE] = {
.doit = ib_nl_handle_resolve_resp,
.flags = RDMA_NL_ADMIN_PERM,
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index e9e189e..5d676cf 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason)
}
EXPORT_SYMBOL(iwcm_reject_msg);
-static struct rdma_nl_cbs iwcm_nl_cb_table[] = {
+static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 2fae850..0dcd1aa 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -142,27 +142,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = __ib_device_get_by_index(index);
+ device = ib_device_get_by_index(index);
if (!device)
return -EINVAL;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
+ if (!msg) {
+ err = -ENOMEM;
+ goto err;
+ }
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
0, 0);
err = fill_dev_info(msg, device);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
+ if (err)
+ goto err_free;
nlmsg_end(msg, nlh);
+ put_device(&device->dev);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ put_device(&device->dev);
+ return err;
}
static int _nldev_get_dumpit(struct ib_device *device,
@@ -220,31 +227,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = __ib_device_get_by_index(index);
+ device = ib_device_get_by_index(index);
if (!device)
return -EINVAL;
port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
- if (!rdma_is_port_valid(device, port))
- return -EINVAL;
+ if (!rdma_is_port_valid(device, port)) {
+ err = -EINVAL;
+ goto err;
+ }
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
+ if (!msg) {
+ err = -ENOMEM;
+ goto err;
+ }
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
0, 0);
err = fill_port_info(msg, device, port);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
+ if (err)
+ goto err_free;
nlmsg_end(msg, nlh);
+ put_device(&device->dev);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ put_device(&device->dev);
+ return err;
}
static int nldev_port_get_dumpit(struct sk_buff *skb,
@@ -265,7 +281,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
return -EINVAL;
ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
- device = __ib_device_get_by_index(ifindex);
+ device = ib_device_get_by_index(ifindex);
if (!device)
return -EINVAL;
@@ -299,11 +315,13 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
nlmsg_end(skb, nlh);
}
-out: cb->args[0] = idx;
+out:
+ put_device(&device->dev);
+ cb->args[0] = idx;
return skb->len;
}
-static const struct rdma_nl_cbs nldev_cb_table[] = {
+static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
[RDMA_NLDEV_CMD_GET] = {
.doit = nldev_get_doit,
.dump = nldev_get_dumpit,
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index a337386..59b2f96 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -386,6 +386,9 @@ int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
if (ret)
return ret;
+ if (!qp->qp_sec)
+ return 0;
+
mutex_lock(&real_qp->qp_sec->mutex);
ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
qp->qp_sec);
@@ -739,8 +742,11 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
return 0;
- if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
- return -EACCES;
+ if (map->agent.qp->qp_type == IB_QPT_SMI) {
+ if (!map->agent.smp_allowed)
+ return -EACCES;
+ return 0;
+ }
return ib_security_pkey_access(map->agent.device,
map->agent.port_num,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 16d5571..840b240 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1971,6 +1971,12 @@ static int modify_qp(struct ib_uverbs_file *file,
goto release_qp;
}
+ if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
+ !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+
attr->qp_state = cmd->base.qp_state;
attr->cur_qp_state = cmd->base.cur_qp_state;
attr->path_mtu = cmd->base.path_mtu;
@@ -2068,8 +2074,8 @@ int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
return -EOPNOTSUPP;
if (ucore->inlen > sizeof(cmd)) {
- if (ib_is_udata_cleared(ucore, sizeof(cmd),
- ucore->inlen - sizeof(cmd)))
+ if (!ib_is_udata_cleared(ucore, sizeof(cmd),
+ ucore->inlen - sizeof(cmd)))
return -EOPNOTSUPP;
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 3fb8fb6..e36d27e 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1438,7 +1438,8 @@ int ib_close_qp(struct ib_qp *qp)
spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
atomic_dec(&real_qp->usecnt);
- ib_close_shared_qp_security(qp->qp_sec);
+ if (qp->qp_sec)
+ ib_close_shared_qp_security(qp->qp_sec);
kfree(qp);
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index ea55e95..6f2b261 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -395,6 +395,11 @@ next_cqe:
static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
{
+ if (DRAIN_CQE(cqe)) {
+ WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
+ return 0;
+ }
+
if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
return 0;
@@ -489,7 +494,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
/*
* Special cqe for drain WR completions...
*/
- if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+ if (DRAIN_CQE(hw_cqe)) {
*cookie = CQE_DRAIN_COOKIE(hw_cqe);
*cqe = *hw_cqe;
goto skip_cqe;
@@ -566,10 +571,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
ret = -EAGAIN;
goto skip_cqe;
}
- if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
+ if (unlikely(!CQE_STATUS(hw_cqe) &&
+ CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
t4_set_wq_in_error(wq);
- hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
- goto proc_cqe;
+ hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
}
goto proc_cqe;
}
@@ -743,9 +748,6 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
c4iw_invalidate_mr(qhp->rhp,
CQE_WRID_FR_STAG(&cqe));
break;
- case C4IW_DRAIN_OPCODE:
- wc->opcode = IB_WC_SEND;
- break;
default:
pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
CQE_OPCODE(&cqe), CQE_QPID(&cqe));
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 470f97a..65dd372 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -693,8 +693,6 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
return IB_QPS_ERR;
}
-#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
-
static inline u32 c4iw_ib_to_tpt_access(int a)
{
return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 5ee7fe4..d5c92fc 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -790,21 +790,57 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
return 0;
}
-static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+static int ib_to_fw_opcode(int ib_opcode)
+{
+ int opcode;
+
+ switch (ib_opcode) {
+ case IB_WR_SEND_WITH_INV:
+ opcode = FW_RI_SEND_WITH_INV;
+ break;
+ case IB_WR_SEND:
+ opcode = FW_RI_SEND;
+ break;
+ case IB_WR_RDMA_WRITE:
+ opcode = FW_RI_RDMA_WRITE;
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
+ opcode = FW_RI_READ_REQ;
+ break;
+ case IB_WR_REG_MR:
+ opcode = FW_RI_FAST_REGISTER;
+ break;
+ case IB_WR_LOCAL_INV:
+ opcode = FW_RI_LOCAL_INV;
+ break;
+ default:
+ opcode = -EINVAL;
+ }
+ return opcode;
+}
+
+static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
{
struct t4_cqe cqe = {};
struct c4iw_cq *schp;
unsigned long flag;
struct t4_cq *cq;
+ int opcode;
schp = to_c4iw_cq(qhp->ibqp.send_cq);
cq = &schp->cq;
+ opcode = ib_to_fw_opcode(wr->opcode);
+ if (opcode < 0)
+ return opcode;
+
cqe.u.drain_cookie = wr->wr_id;
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
- CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+ CQE_OPCODE_V(opcode) |
CQE_TYPE_V(1) |
CQE_SWCQE_V(1) |
+ CQE_DRAIN_V(1) |
CQE_QPID_V(qhp->wq.sq.qid));
spin_lock_irqsave(&schp->lock, flag);
@@ -819,6 +855,23 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
schp->ibcq.cq_context);
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
}
+ return 0;
+}
+
+static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ int ret = 0;
+
+ while (wr) {
+ ret = complete_sq_drain_wr(qhp, wr);
+ if (ret) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ return ret;
}
static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
@@ -833,9 +886,10 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
cqe.u.drain_cookie = wr->wr_id;
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
- CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+ CQE_OPCODE_V(FW_RI_SEND) |
CQE_TYPE_V(0) |
CQE_SWCQE_V(1) |
+ CQE_DRAIN_V(1) |
CQE_QPID_V(qhp->wq.sq.qid));
spin_lock_irqsave(&rchp->lock, flag);
@@ -852,6 +906,14 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
}
}
+static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+{
+ while (wr) {
+ complete_rq_drain_wr(qhp, wr);
+ wr = wr->next;
+ }
+}
+
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
@@ -868,9 +930,14 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
qhp = to_c4iw_qp(ibqp);
spin_lock_irqsave(&qhp->lock, flag);
- if (t4_wq_in_error(&qhp->wq)) {
+
+ /*
+ * If the qp has been flushed, then just insert a special
+ * drain cqe.
+ */
+ if (qhp->wq.flushed) {
spin_unlock_irqrestore(&qhp->lock, flag);
- complete_sq_drain_wr(qhp, wr);
+ err = complete_sq_drain_wrs(qhp, wr, bad_wr);
return err;
}
num_wrs = t4_sq_avail(&qhp->wq);
@@ -1011,9 +1078,14 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qhp = to_c4iw_qp(ibqp);
spin_lock_irqsave(&qhp->lock, flag);
- if (t4_wq_in_error(&qhp->wq)) {
+
+ /*
+ * If the qp has been flushed, then just insert a special
+ * drain cqe.
+ */
+ if (qhp->wq.flushed) {
spin_unlock_irqrestore(&qhp->lock, flag);
- complete_rq_drain_wr(qhp, wr);
+ complete_rq_drain_wrs(qhp, wr);
return err;
}
num_wrs = t4_rq_avail(&qhp->wq);
@@ -1285,21 +1357,21 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
spin_unlock_irqrestore(&rchp->lock, flag);
if (schp == rchp) {
- if (t4_clear_cq_armed(&rchp->cq) &&
- (rq_flushed || sq_flushed)) {
+ if ((rq_flushed || sq_flushed) &&
+ t4_clear_cq_armed(&rchp->cq)) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
} else {
- if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
+ if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
}
- if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
+ if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
spin_lock_irqsave(&schp->comp_handler_lock, flag);
(*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index e9ea942..79e8ee1 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -197,6 +197,11 @@ struct t4_cqe {
#define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
#define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S)
+#define CQE_DRAIN_S 10
+#define CQE_DRAIN_M 0x1
+#define CQE_DRAIN_G(x) ((((x) >> CQE_DRAIN_S)) & CQE_DRAIN_M)
+#define CQE_DRAIN_V(x) ((x)<<CQE_DRAIN_S)
+
#define CQE_STATUS_S 5
#define CQE_STATUS_M 0x1F
#define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
@@ -213,6 +218,7 @@ struct t4_cqe {
#define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S)
#define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header)))
+#define DRAIN_CQE(x) (CQE_DRAIN_G(be32_to_cpu((x)->header)))
#define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header)))
#define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header)))
#define SQ_TYPE(x) (CQE_TYPE((x)))
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 7750a9c..1df7da4 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -763,11 +763,11 @@ static int complete_subctxt(struct hfi1_filedata *fd)
}
if (ret) {
- hfi1_rcd_put(fd->uctxt);
- fd->uctxt = NULL;
spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
__clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
+ hfi1_rcd_put(fd->uctxt);
+ fd->uctxt = NULL;
}
return ret;
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 4a9b4d7..8ce9118 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1131,7 +1131,6 @@ struct hfi1_devdata {
u16 pcie_lnkctl;
u16 pcie_devctl2;
u32 pci_msix0;
- u32 pci_lnkctl3;
u32 pci_tph2;
/*
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 09e50fd..8c7e7a6 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -411,15 +411,12 @@ int restore_pci_variables(struct hfi1_devdata *dd)
if (ret)
goto error;
- ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
- dd->pci_lnkctl3);
- if (ret)
- goto error;
-
- ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2);
- if (ret)
- goto error;
-
+ if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
+ ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2,
+ dd->pci_tph2);
+ if (ret)
+ goto error;
+ }
return 0;
error:
@@ -469,15 +466,12 @@ int save_pci_variables(struct hfi1_devdata *dd)
if (ret)
goto error;
- ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
- &dd->pci_lnkctl3);
- if (ret)
- goto error;
-
- ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2);
- if (ret)
- goto error;
-
+ if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
+ ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2,
+ &dd->pci_tph2);
+ if (ret)
+ goto error;
+ }
return 0;
error:
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index af5f793..7eb5d50 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -302,7 +302,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -346,7 +345,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
newreq = 0;
if (qp->s_cur == qp->s_tail) {
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_tail == READ_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
@@ -900,7 +898,6 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd,
}
/* Ensure s_rdma_ack_cnt changes are committed */
- smp_read_barrier_depends();
if (qp->s_rdma_ack_cnt) {
hfi1_queue_rc_ack(qp, is_fecn);
return;
@@ -1562,7 +1559,6 @@ static void rc_rcv_resp(struct hfi1_packet *packet)
trace_hfi1_ack(qp, psn);
/* Ignore invalid responses. */
- smp_read_barrier_depends(); /* see post_one_send */
if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 2c7fc6e..13b9947 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -362,7 +362,6 @@ static void ruc_loopback(struct rvt_qp *sqp)
sqp->s_flags |= RVT_S_BUSY;
again:
- smp_read_barrier_depends(); /* see post_one_send() */
if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 31c8f89..61c130d 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -553,7 +553,6 @@ static void sdma_hw_clean_up_task(unsigned long opaque)
static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
{
- smp_read_barrier_depends(); /* see sdma_update_tail() */
return sde->tx_ring[sde->tx_head & sde->sdma_mask];
}
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 991bbee..132b63e 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -79,7 +79,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -119,7 +118,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_cur == READ_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index beb5091..deb1845 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -486,7 +486,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -500,7 +499,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
/* see post_one_send() */
- smp_read_barrier_depends();
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 313bfb9..4975f3e 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -642,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
goto err_free_mr;
mr->max_pages = max_num_sg;
-
err = mlx4_mr_enable(dev->dev, &mr->mmr);
if (err)
goto err_free_pl;
@@ -653,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
return &mr->ibmr;
err_free_pl:
+ mr->ibmr.device = pd->device;
mlx4_free_priv_pages(mr);
err_free_mr:
(void) mlx4_mr_free(dev->dev, &mr->mmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 013049b..caf490a 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP);
}
+ if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
+ MLX4_IB_RX_HASH_DST_IPV4 |
+ MLX4_IB_RX_HASH_SRC_IPV6 |
+ MLX4_IB_RX_HASH_DST_IPV6 |
+ MLX4_IB_RX_HASH_SRC_PORT_TCP |
+ MLX4_IB_RX_HASH_DST_PORT_TCP |
+ MLX4_IB_RX_HASH_SRC_PORT_UDP |
+ MLX4_IB_RX_HASH_DST_PORT_UDP)) {
+ pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
+ ucmd->rx_hash_fields_mask);
+ return (-EOPNOTSUPP);
+ }
+
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
rss_ctx->flags = MLX4_RSS_IPV4;
@@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP);
}
- if (rss_ctx->flags & MLX4_RSS_IPV4) {
+ if (rss_ctx->flags & MLX4_RSS_IPV4)
rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
- } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
+ if (rss_ctx->flags & MLX4_RSS_IPV6)
rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
- } else {
+ if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
return (-EOPNOTSUPP);
}
@@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
- if (rss_ctx->flags & MLX4_RSS_IPV4) {
+ if (rss_ctx->flags & MLX4_RSS_IPV4)
rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
- } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
+ if (rss_ctx->flags & MLX4_RSS_IPV6)
rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
- } else {
+ if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
return (-EOPNOTSUPP);
}
-
} else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 470995f..6f6712f 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -47,17 +47,6 @@ int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
return err;
}
-int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
- bool reset, void *out, int out_size)
-{
- u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
-
- MLX5_SET(query_cong_statistics_in, in, opcode,
- MLX5_CMD_OP_QUERY_CONG_STATISTICS);
- MLX5_SET(query_cong_statistics_in, in, clear, reset);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
void *out, int out_size)
{
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index af4c245..78ffded 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -37,8 +37,6 @@
#include <linux/mlx5/driver.h>
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
-int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
- bool reset, void *out, int out_size);
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
void *out, int out_size);
int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 543d0a4..262c1aa 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1324,7 +1324,8 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
return err;
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
- !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
+ (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
+ !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
return err;
mutex_lock(&dev->lb_mutex);
@@ -1342,7 +1343,8 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
- !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
+ (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
+ !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
return;
mutex_lock(&dev->lb_mutex);
@@ -1463,6 +1465,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
}
INIT_LIST_HEAD(&context->vma_private_list);
+ mutex_init(&context->vma_private_list_mutex);
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
@@ -1624,7 +1627,9 @@ static void mlx5_ib_vma_close(struct vm_area_struct *area)
* mlx5_ib_disassociate_ucontext().
*/
mlx5_ib_vma_priv_data->vma = NULL;
+ mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
list_del(&mlx5_ib_vma_priv_data->list);
+ mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
kfree(mlx5_ib_vma_priv_data);
}
@@ -1644,10 +1649,13 @@ static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
return -ENOMEM;
vma_prv->vma = vma;
+ vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
vma->vm_private_data = vma_prv;
vma->vm_ops = &mlx5_ib_vm_ops;
+ mutex_lock(&ctx->vma_private_list_mutex);
list_add(&vma_prv->list, vma_head);
+ mutex_unlock(&ctx->vma_private_list_mutex);
return 0;
}
@@ -1690,6 +1698,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
* mlx5_ib_vma_close.
*/
down_write(&owning_mm->mmap_sem);
+ mutex_lock(&context->vma_private_list_mutex);
list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
list) {
vma = vma_private->vma;
@@ -1704,6 +1713,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
list_del(&vma_private->list);
kfree(vma_private);
}
+ mutex_unlock(&context->vma_private_list_mutex);
up_write(&owning_mm->mmap_sem);
mmput(owning_mm);
put_task_struct(owning_process);
@@ -3737,34 +3747,6 @@ free:
return ret;
}
-static int mlx5_ib_query_cong_counters(struct mlx5_ib_dev *dev,
- struct mlx5_ib_port *port,
- struct rdma_hw_stats *stats)
-{
- int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
- void *out;
- int ret, i;
- int offset = port->cnts.num_q_counters;
-
- out = kvzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- ret = mlx5_cmd_query_cong_counter(dev->mdev, false, out, outlen);
- if (ret)
- goto free;
-
- for (i = 0; i < port->cnts.num_cong_counters; i++) {
- stats->value[i + offset] =
- be64_to_cpup((__be64 *)(out +
- port->cnts.offsets[i + offset]));
- }
-
-free:
- kvfree(out);
- return ret;
-}
-
static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u8 port_num, int index)
@@ -3782,7 +3764,12 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
num_counters = port->cnts.num_q_counters;
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
- ret = mlx5_ib_query_cong_counters(dev, port, stats);
+ ret = mlx5_lag_query_cong_counters(dev->mdev,
+ stats->value +
+ port->cnts.num_q_counters,
+ port->cnts.num_cong_counters,
+ port->cnts.offsets +
+ port->cnts.num_q_counters);
if (ret)
return ret;
num_counters += port->cnts.num_cong_counters;
@@ -4173,7 +4160,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
goto err_cnt;
dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
- if (!dev->mdev->priv.uar)
+ if (IS_ERR(dev->mdev->priv.uar))
goto err_cong;
err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
@@ -4202,7 +4189,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
}
if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
- MLX5_CAP_GEN(mdev, disable_local_lb))
+ (MLX5_CAP_GEN(mdev, disable_local_lb_uc) ||
+ MLX5_CAP_GEN(mdev, disable_local_lb_mc)))
mutex_init(&dev->lb_mutex);
dev->ib_active = true;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 6dd8cac..2c5f353 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -115,6 +115,8 @@ enum {
struct mlx5_ib_vma_private_data {
struct list_head list;
struct vm_area_struct *vma;
+ /* protect vma_private_list add/del */
+ struct mutex *vma_private_list_mutex;
};
struct mlx5_ib_ucontext {
@@ -129,6 +131,8 @@ struct mlx5_ib_ucontext {
/* Transport Domain number */
u32 tdn;
struct list_head vma_private_list;
+ /* protect vma_private_list add/del */
+ struct mutex vma_private_list_mutex;
unsigned long upd_xlt_page;
/* protect ODP/KSM */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index ee0ee1f..d109fe8 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1637,6 +1637,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
MLX5_SET(mkc, mkc, umr_en, 1);
+ mr->ibmr.device = pd->device;
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
if (err)
goto err_destroy_psv;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 31ad2885..cffe596 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4362,12 +4362,11 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
memset(ah_attr, 0, sizeof(*ah_attr));
- ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
- rdma_ah_set_port_num(ah_attr, path->port);
- if (rdma_ah_get_port_num(ah_attr) == 0 ||
- rdma_ah_get_port_num(ah_attr) > MLX5_CAP_GEN(dev, num_ports))
+ if (!path->port || path->port > MLX5_CAP_GEN(dev, num_ports))
return;
+ ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
+
rdma_ah_set_port_num(ah_attr, path->port);
rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 8f5754f..1a785c3 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -246,7 +246,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -293,7 +292,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
newreq = 0;
if (qp->s_cur == qp->s_tail) {
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_tail == READ_ONCE(qp->s_head))
goto bail;
/*
@@ -1340,7 +1338,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
goto ack_done;
/* Ignore invalid responses. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 9a37e84..4662cc7 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -367,7 +367,6 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
sqp->s_flags |= RVT_S_BUSY;
again:
- smp_read_barrier_depends(); /* see post_one_send() */
if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index bddcc37..70c58b8 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -60,7 +60,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -90,7 +89,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
/*
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 15962ed..386c3c4 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -252,7 +252,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -266,7 +265,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
}
/* see post_one_send() */
- smp_read_barrier_depends();
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 63bc2ef..4f7bd3b6 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -94,7 +94,7 @@ struct pvrdma_cq {
u32 cq_handle;
bool is_kernel;
atomic_t refcnt;
- wait_queue_head_t wait;
+ struct completion free;
};
struct pvrdma_id_table {
@@ -175,7 +175,7 @@ struct pvrdma_srq {
u32 srq_handle;
int npages;
refcount_t refcnt;
- wait_queue_head_t wait;
+ struct completion free;
};
struct pvrdma_qp {
@@ -197,7 +197,7 @@ struct pvrdma_qp {
bool is_kernel;
struct mutex mutex; /* QP state mutex. */
atomic_t refcnt;
- wait_queue_head_t wait;
+ struct completion free;
};
struct pvrdma_dev {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 3562c0c..e529622 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -179,7 +179,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
atomic_set(&cq->refcnt, 1);
- init_waitqueue_head(&cq->wait);
+ init_completion(&cq->free);
spin_lock_init(&cq->cq_lock);
memset(cmd, 0, sizeof(*cmd));
@@ -230,8 +230,9 @@ err_cq:
static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
{
- atomic_dec(&cq->refcnt);
- wait_event(cq->wait, !atomic_read(&cq->refcnt));
+ if (atomic_dec_and_test(&cq->refcnt))
+ complete(&cq->free);
+ wait_for_completion(&cq->free);
if (!cq->is_kernel)
ib_umem_release(cq->umem);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 1f4e187..e926818 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -346,9 +346,8 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
ibqp->event_handler(&e, ibqp->qp_context);
}
if (qp) {
- atomic_dec(&qp->refcnt);
- if (atomic_read(&qp->refcnt) == 0)
- wake_up(&qp->wait);
+ if (atomic_dec_and_test(&qp->refcnt))
+ complete(&qp->free);
}
}
@@ -373,9 +372,8 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
ibcq->event_handler(&e, ibcq->cq_context);
}
if (cq) {
- atomic_dec(&cq->refcnt);
- if (atomic_read(&cq->refcnt) == 0)
- wake_up(&cq->wait);
+ if (atomic_dec_and_test(&cq->refcnt))
+ complete(&cq->free);
}
}
@@ -404,7 +402,7 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
}
if (srq) {
if (refcount_dec_and_test(&srq->refcnt))
- wake_up(&srq->wait);
+ complete(&srq->free);
}
}
@@ -539,9 +537,8 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
if (cq && cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
if (cq) {
- atomic_dec(&cq->refcnt);
- if (atomic_read(&cq->refcnt))
- wake_up(&cq->wait);
+ if (atomic_dec_and_test(&cq->refcnt))
+ complete(&cq->free);
}
pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 10420a1..4059308 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -246,7 +246,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
spin_lock_init(&qp->rq.lock);
mutex_init(&qp->mutex);
atomic_set(&qp->refcnt, 1);
- init_waitqueue_head(&qp->wait);
+ init_completion(&qp->free);
qp->state = IB_QPS_RESET;
@@ -428,8 +428,16 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
- atomic_dec(&qp->refcnt);
- wait_event(qp->wait, !atomic_read(&qp->refcnt));
+ if (atomic_dec_and_test(&qp->refcnt))
+ complete(&qp->free);
+ wait_for_completion(&qp->free);
+
+ if (!qp->is_kernel) {
+ if (qp->rumem)
+ ib_umem_release(qp->rumem);
+ if (qp->sumem)
+ ib_umem_release(qp->sumem);
+ }
pvrdma_page_dir_cleanup(dev, &qp->pdir);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 826ccb8..5acebb1 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -149,7 +149,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
spin_lock_init(&srq->lock);
refcount_set(&srq->refcnt, 1);
- init_waitqueue_head(&srq->wait);
+ init_completion(&srq->free);
dev_dbg(&dev->pdev->dev,
"create shared receive queue from user space\n");
@@ -236,8 +236,9 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
dev->srq_tbl[srq->srq_handle] = NULL;
spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
- refcount_dec(&srq->refcnt);
- wait_event(srq->wait, !refcount_read(&srq->refcnt));
+ if (refcount_dec_and_test(&srq->refcnt))
+ complete(&srq->free);
+ wait_for_completion(&srq->free);
/* There is no support for kernel clients, so this is safe. */
ib_umem_release(srq->umem);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 9177df6..eae84c2 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1684,7 +1684,6 @@ static inline int rvt_qp_is_avail(
/* non-reserved operations */
if (likely(qp->s_avail))
return 0;
- smp_read_barrier_depends(); /* see rc.c */
slast = READ_ONCE(qp->s_last);
if (qp->s_head >= slast)
avail = qp->s_size - (qp->s_head - slast);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 87f4bd9..71ea9e2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1145,6 +1145,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
noio_flag = memalloc_noio_save();
p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
if (!p->tx_ring) {
+ memalloc_noio_restore(noio_flag);
ret = -ENOMEM;
goto err_tx;
}
@@ -1455,8 +1456,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int e = skb_queue_empty(&priv->cm.skb_queue);
- if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+ skb_dst_update_pmtu(skb, mtu);
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 3b96cda..e6151a2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1236,13 +1236,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
ipoib_ib_dev_down(dev);
if (level == IPOIB_FLUSH_HEAVY) {
- rtnl_lock();
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
ipoib_ib_dev_stop(dev);
- result = ipoib_ib_dev_open(dev);
- rtnl_unlock();
- if (result)
+ if (ipoib_ib_dev_open(dev))
return;
if (netif_queue_stopped(dev))
@@ -1282,7 +1279,9 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_heavy);
+ rtnl_lock();
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
+ rtnl_unlock();
}
void ipoib_ib_dev_cleanup(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 12b7f91..8880351d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -902,8 +902,8 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
-static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
- struct net_device *dev)
+static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
+ struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
@@ -917,7 +917,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
- return;
+ return NULL;
+ }
+
+ /* To avoid race condition, make sure that the
+ * neigh will be added only once.
+ */
+ if (unlikely(!list_empty(&neigh->list))) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return neigh;
}
path = __path_find(dev, daddr + 4);
@@ -956,7 +964,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
path->ah->last_send = rn->send(dev, skb, path->ah->ah,
IPOIB_QPN(daddr));
ipoib_neigh_put(neigh);
- return;
+ return NULL;
}
} else {
neigh->ah = NULL;
@@ -973,7 +981,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
- return;
+ return NULL;
err_path:
ipoib_neigh_free(neigh);
@@ -983,6 +991,8 @@ err_drop:
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
+
+ return NULL;
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -1091,8 +1101,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_TIPC):
neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
- neigh_add_path(skb, phdr->hwaddr, dev);
- return NETDEV_TX_OK;
+ neigh = neigh_add_path(skb, phdr->hwaddr, dev);
+ if (likely(!neigh))
+ return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 93e149e..9b3f47a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
spin_lock_irqsave(&priv->lock, flags);
if (!neigh) {
neigh = ipoib_neigh_alloc(daddr, dev);
- if (neigh) {
+ /* Make sure that the neigh will be added only
+ * once to mcast list.
+ */
+ if (neigh && list_empty(&neigh->list)) {
kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah;
list_add_tail(&neigh->list, &mcast->neigh_list);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 720dfb3..1b02283 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -741,6 +741,7 @@ isert_connect_error(struct rdma_cm_id *cma_id)
{
struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ ib_drain_qp(isert_conn->qp);
list_del_init(&isert_conn->node);
isert_conn->cm_id = NULL;
isert_put_conn(isert_conn);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 8a1bd35..bfa576a 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1013,8 +1013,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
return -ENOMEM;
attr->qp_state = IB_QPS_INIT;
- attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE;
+ attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
attr->port_num = ch->sport->port;
attr->pkey_index = 0;
@@ -2078,7 +2077,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
goto destroy_ib;
}
- guid = (__be16 *)&param->primary_path->sgid.global.interface_id;
+ guid = (__be16 *)&param->primary_path->dgid.global.interface_id;
snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x",
be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 3d8ff09..c868a87 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -163,7 +163,7 @@ static unsigned int get_time_pit(void)
#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "TSC"
-#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
+#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV) || defined(CONFIG_TILE)
#define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "get_cycles"
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d86e595..d88d3e0 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -229,6 +229,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -476,6 +477,22 @@ static const u8 xboxone_hori_init[] = {
};
/*
+ * This packet is required for some of the PDP pads to start
+ * sending input reports. One of those pads is (0x0e6f:0x02ab).
+ */
+static const u8 xboxone_pdp_init1[] = {
+ 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
+};
+
+/*
+ * This packet is required for some of the PDP pads to start
+ * sending input reports. One of those pads is (0x0e6f:0x02ab).
+ */
+static const u8 xboxone_pdp_init2[] = {
+ 0x06, 0x20, 0x00, 0x02, 0x01, 0x00
+};
+
+/*
* A specific rumble packet is required for some PowerA pads to start
* sending input reports. One of those pads is (0x24c6:0x543a).
*/
@@ -505,6 +522,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
+ XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
+ XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index ae47312..3d51175 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1651,7 +1651,7 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
return union_desc;
dev_err(&intf->dev,
- "Union descriptor to short (%d vs %zd\n)",
+ "Union descriptor too short (%d vs %zd)\n",
union_desc->bLength, sizeof(*union_desc));
return NULL;
}
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 6c51d40..c37aea9 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
twl4030_vibra_suspend, twl4030_vibra_resume);
static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
- struct device_node *node)
+ struct device_node *parent)
{
+ struct device_node *node;
+
if (pdata && pdata->coexist)
return true;
- node = of_find_node_by_name(node, "codec");
+ node = of_get_child_by_name(parent, "codec");
if (node) {
of_node_put(node);
return true;
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 5690eb7..15e0d35 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -248,8 +248,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
int vddvibr_uV = 0;
int error;
- of_node_get(twl6040_core_dev->of_node);
- twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
+ twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
"vibra");
if (!twl6040_core_node) {
dev_err(&pdev->dev, "parent of node is missing?\n");
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 6bf56bb..d91f3b1 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -326,8 +326,6 @@ static int xenkbd_probe(struct xenbus_device *dev,
0, width, 0, 0);
input_set_abs_params(mtouch, ABS_MT_POSITION_Y,
0, height, 0, 0);
- input_set_abs_params(mtouch, ABS_MT_PRESSURE,
- 0, 255, 0, 0);
ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT);
if (ret) {
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 579b899..dbe57da 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1250,29 +1250,32 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
case SS4_PACKET_ID_MULTI:
if (priv->flags & ALPS_BUTTONPAD) {
if (IS_SS4PLUS_DEV(priv->dev_id)) {
- f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
- f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+ f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
+ f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+ no_data_x = SS4_PLUS_MFPACKET_NO_AX_BL;
} else {
f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
+ no_data_x = SS4_MFPACKET_NO_AX_BL;
}
+ no_data_y = SS4_MFPACKET_NO_AY_BL;
f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
- no_data_x = SS4_MFPACKET_NO_AX_BL;
- no_data_y = SS4_MFPACKET_NO_AY_BL;
} else {
if (IS_SS4PLUS_DEV(priv->dev_id)) {
- f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
- f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+ f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0);
+ f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+ no_data_x = SS4_PLUS_MFPACKET_NO_AX;
} else {
- f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
- f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+ f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
+ f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
+ no_data_x = SS4_MFPACKET_NO_AX;
}
+ no_data_y = SS4_MFPACKET_NO_AY;
+
f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
- no_data_x = SS4_MFPACKET_NO_AX;
- no_data_y = SS4_MFPACKET_NO_AY;
}
f->first_mp = 0;
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index c80a7c7..79b6d69 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -141,10 +141,12 @@ enum SS4_PACKET_ID {
#define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F)
-#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
-#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
-#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coordinate value */
-#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coordinate value */
+#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
+#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
+#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coord value */
+#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coord value */
+#define SS4_PLUS_MFPACKET_NO_AX 4080 /* SS4 PLUS, X */
+#define SS4_PLUS_MFPACKET_NO_AX_BL 4088 /* Buttonless SS4 PLUS, X */
/*
* enum V7_PACKET_ID - defines the packet type for V7
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index b84cd97..a4aaa74 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd)
case 5:
etd->hw_version = 3;
break;
- case 6 ... 14:
+ case 6 ... 15:
etd->hw_version = 4;
break;
default:
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ee5466a..cd9f61c 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -173,6 +173,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0046", /* X250 */
"LEN004a", /* W541 */
"LEN200f", /* T450s */
+ "LEN2018", /* T460p */
NULL
};
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 0871010..bbd2922 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -19,6 +19,13 @@
#include "psmouse.h"
#include "trackpoint.h"
+static const char * const trackpoint_variants[] = {
+ [TP_VARIANT_IBM] = "IBM",
+ [TP_VARIANT_ALPS] = "ALPS",
+ [TP_VARIANT_ELAN] = "Elan",
+ [TP_VARIANT_NXP] = "NXP",
+};
+
/*
* Power-on Reset: Resets all trackpoint parameters, including RAM values,
* to defaults.
@@ -26,7 +33,7 @@
*/
static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
{
- unsigned char results[2];
+ u8 results[2];
int tries = 0;
/* Issue POR command, and repeat up to once if 0xFC00 received */
@@ -38,7 +45,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
/* Check for success response -- 0xAA00 */
if (results[0] != 0xAA || results[1] != 0x00)
- return -1;
+ return -ENODEV;
return 0;
}
@@ -46,8 +53,7 @@ static int trackpoint_power_on_reset(struct ps2dev *ps2dev)
/*
* Device IO: read, write and toggle bit
*/
-static int trackpoint_read(struct ps2dev *ps2dev,
- unsigned char loc, unsigned char *results)
+static int trackpoint_read(struct ps2dev *ps2dev, u8 loc, u8 *results)
{
if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
ps2_command(ps2dev, results, MAKE_PS2_CMD(0, 1, loc))) {
@@ -57,8 +63,7 @@ static int trackpoint_read(struct ps2dev *ps2dev,
return 0;
}
-static int trackpoint_write(struct ps2dev *ps2dev,
- unsigned char loc, unsigned char val)
+static int trackpoint_write(struct ps2dev *ps2dev, u8 loc, u8 val)
{
if (ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_COMMAND)) ||
ps2_command(ps2dev, NULL, MAKE_PS2_CMD(0, 0, TP_WRITE_MEM)) ||
@@ -70,8 +75,7 @@ static int trackpoint_write(struct ps2dev *ps2dev,
return 0;
}
-static int trackpoint_toggle_bit(struct ps2dev *ps2dev,
- unsigned char loc, unsigned char mask)
+static int trackpoint_toggle_bit(struct ps2dev *ps2dev, u8 loc, u8 mask)
{
/* Bad things will happen if the loc param isn't in this range */
if (loc < 0x20 || loc >= 0x2F)
@@ -87,11 +91,11 @@ static int trackpoint_toggle_bit(struct ps2dev *ps2dev,
return 0;
}
-static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc,
- unsigned char mask, unsigned char value)
+static int trackpoint_update_bit(struct ps2dev *ps2dev,
+ u8 loc, u8 mask, u8 value)
{
int retval = 0;
- unsigned char data;
+ u8 data;
trackpoint_read(ps2dev, loc, &data);
if (((data & mask) == mask) != !!value)
@@ -105,17 +109,18 @@ static int trackpoint_update_bit(struct ps2dev *ps2dev, unsigned char loc,
*/
struct trackpoint_attr_data {
size_t field_offset;
- unsigned char command;
- unsigned char mask;
- unsigned char inverted;
- unsigned char power_on_default;
+ u8 command;
+ u8 mask;
+ bool inverted;
+ u8 power_on_default;
};
-static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse, void *data, char *buf)
+static ssize_t trackpoint_show_int_attr(struct psmouse *psmouse,
+ void *data, char *buf)
{
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
- unsigned char value = *(unsigned char *)((char *)tp + attr->field_offset);
+ u8 value = *(u8 *)((void *)tp + attr->field_offset);
if (attr->inverted)
value = !value;
@@ -128,8 +133,8 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
{
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
- unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
- unsigned char value;
+ u8 *field = (void *)tp + attr->field_offset;
+ u8 value;
int err;
err = kstrtou8(buf, 10, &value);
@@ -157,17 +162,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
{
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
- unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
- unsigned int value;
+ bool *field = (void *)tp + attr->field_offset;
+ bool value;
int err;
- err = kstrtouint(buf, 10, &value);
+ err = kstrtobool(buf, &value);
if (err)
return err;
- if (value > 1)
- return -EINVAL;
-
if (attr->inverted)
value = !value;
@@ -193,30 +195,6 @@ PSMOUSE_DEFINE_ATTR(_name, S_IWUSR | S_IRUGO, \
&trackpoint_attr_##_name, \
trackpoint_show_int_attr, trackpoint_set_bit_attr)
-#define TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name) \
-do { \
- struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \
- \
- trackpoint_update_bit(&_psmouse->ps2dev, \
- _attr->command, _attr->mask, _tp->_name); \
-} while (0)
-
-#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \
-do { \
- if (!_power_on || \
- _tp->_name != trackpoint_attr_##_name.power_on_default) { \
- if (!trackpoint_attr_##_name.mask) \
- trackpoint_write(&_psmouse->ps2dev, \
- trackpoint_attr_##_name.command, \
- _tp->_name); \
- else \
- TRACKPOINT_UPDATE_BIT(_psmouse, _tp, _name); \
- } \
-} while (0)
-
-#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \
- (_tp->_name = trackpoint_attr_##_name.power_on_default)
-
TRACKPOINT_INT_ATTR(sensitivity, TP_SENS, TP_DEF_SENS);
TRACKPOINT_INT_ATTR(speed, TP_SPEED, TP_DEF_SPEED);
TRACKPOINT_INT_ATTR(inertia, TP_INERTIA, TP_DEF_INERTIA);
@@ -229,13 +207,33 @@ TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
-TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
+TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, false,
TP_DEF_PTSON);
-TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, 0,
+TRACKPOINT_BIT_ATTR(skipback, TP_TOGGLE_SKIPBACK, TP_MASK_SKIPBACK, false,
TP_DEF_SKIPBACK);
-TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, 1,
+TRACKPOINT_BIT_ATTR(ext_dev, TP_TOGGLE_EXT_DEV, TP_MASK_EXT_DEV, true,
TP_DEF_EXT_DEV);
+static bool trackpoint_is_attr_available(struct psmouse *psmouse,
+ struct attribute *attr)
+{
+ struct trackpoint_data *tp = psmouse->private;
+
+ return tp->variant_id == TP_VARIANT_IBM ||
+ attr == &psmouse_attr_sensitivity.dattr.attr ||
+ attr == &psmouse_attr_press_to_select.dattr.attr;
+}
+
+static umode_t trackpoint_is_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct serio *serio = to_serio_port(dev);
+ struct psmouse *psmouse = serio_get_drvdata(serio);
+
+ return trackpoint_is_attr_available(psmouse, attr) ? attr->mode : 0;
+}
+
static struct attribute *trackpoint_attrs[] = {
&psmouse_attr_sensitivity.dattr.attr,
&psmouse_attr_speed.dattr.attr,
@@ -255,24 +253,56 @@ static struct attribute *trackpoint_attrs[] = {
};
static struct attribute_group trackpoint_attr_group = {
- .attrs = trackpoint_attrs,
+ .is_visible = trackpoint_is_attr_visible,
+ .attrs = trackpoint_attrs,
};
-static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *firmware_id)
-{
- unsigned char param[2] = { 0 };
+#define TRACKPOINT_UPDATE(_power_on, _psmouse, _tp, _name) \
+do { \
+ struct trackpoint_attr_data *_attr = &trackpoint_attr_##_name; \
+ \
+ if ((!_power_on || _tp->_name != _attr->power_on_default) && \
+ trackpoint_is_attr_available(_psmouse, \
+ &psmouse_attr_##_name.dattr.attr)) { \
+ if (!_attr->mask) \
+ trackpoint_write(&_psmouse->ps2dev, \
+ _attr->command, _tp->_name); \
+ else \
+ trackpoint_update_bit(&_psmouse->ps2dev, \
+ _attr->command, _attr->mask, \
+ _tp->_name); \
+ } \
+} while (0)
- if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
- return -1;
+#define TRACKPOINT_SET_POWER_ON_DEFAULT(_tp, _name) \
+do { \
+ _tp->_name = trackpoint_attr_##_name.power_on_default; \
+} while (0)
- /* add new TP ID. */
- if (!(param[0] & TP_MAGIC_IDENT))
- return -1;
+static int trackpoint_start_protocol(struct psmouse *psmouse,
+ u8 *variant_id, u8 *firmware_id)
+{
+ u8 param[2] = { 0 };
+ int error;
- if (firmware_id)
- *firmware_id = param[1];
+ error = ps2_command(&psmouse->ps2dev,
+ param, MAKE_PS2_CMD(0, 2, TP_READ_ID));
+ if (error)
+ return error;
+
+ switch (param[0]) {
+ case TP_VARIANT_IBM:
+ case TP_VARIANT_ALPS:
+ case TP_VARIANT_ELAN:
+ case TP_VARIANT_NXP:
+ if (variant_id)
+ *variant_id = param[0];
+ if (firmware_id)
+ *firmware_id = param[1];
+ return 0;
+ }
- return 0;
+ return -ENODEV;
}
/*
@@ -285,7 +315,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
{
struct trackpoint_data *tp = psmouse->private;
- if (!in_power_on_state) {
+ if (!in_power_on_state && tp->variant_id == TP_VARIANT_IBM) {
/*
* Disable features that may make device unusable
* with this driver.
@@ -347,7 +377,8 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
static void trackpoint_disconnect(struct psmouse *psmouse)
{
- sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, &trackpoint_attr_group);
+ device_remove_group(&psmouse->ps2dev.serio->dev,
+ &trackpoint_attr_group);
kfree(psmouse->private);
psmouse->private = NULL;
@@ -355,14 +386,20 @@ static void trackpoint_disconnect(struct psmouse *psmouse)
static int trackpoint_reconnect(struct psmouse *psmouse)
{
- int reset_fail;
+ struct trackpoint_data *tp = psmouse->private;
+ int error;
+ bool was_reset;
- if (trackpoint_start_protocol(psmouse, NULL))
- return -1;
+ error = trackpoint_start_protocol(psmouse, NULL, NULL);
+ if (error)
+ return error;
- reset_fail = trackpoint_power_on_reset(&psmouse->ps2dev);
- if (trackpoint_sync(psmouse, !reset_fail))
- return -1;
+ was_reset = tp->variant_id == TP_VARIANT_IBM &&
+ trackpoint_power_on_reset(&psmouse->ps2dev) == 0;
+
+ error = trackpoint_sync(psmouse, was_reset);
+ if (error)
+ return error;
return 0;
}
@@ -370,46 +407,66 @@ static int trackpoint_reconnect(struct psmouse *psmouse)
int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
- unsigned char firmware_id;
- unsigned char button_info;
+ struct trackpoint_data *tp;
+ u8 variant_id;
+ u8 firmware_id;
+ u8 button_info;
int error;
- if (trackpoint_start_protocol(psmouse, &firmware_id))
- return -1;
+ error = trackpoint_start_protocol(psmouse, &variant_id, &firmware_id);
+ if (error)
+ return error;
if (!set_properties)
return 0;
- if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) {
- psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
- button_info = 0x33;
- }
-
- psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
- if (!psmouse->private)
+ tp = kzalloc(sizeof(*tp), GFP_KERNEL);
+ if (!tp)
return -ENOMEM;
- psmouse->vendor = "IBM";
+ trackpoint_defaults(tp);
+ tp->variant_id = variant_id;
+ tp->firmware_id = firmware_id;
+
+ psmouse->private = tp;
+
+ psmouse->vendor = trackpoint_variants[variant_id];
psmouse->name = "TrackPoint";
psmouse->reconnect = trackpoint_reconnect;
psmouse->disconnect = trackpoint_disconnect;
+ if (variant_id != TP_VARIANT_IBM) {
+ /* Newer variants do not support extended button query. */
+ button_info = 0x33;
+ } else {
+ error = trackpoint_read(ps2dev, TP_EXT_BTN, &button_info);
+ if (error) {
+ psmouse_warn(psmouse,
+ "failed to get extended button data, assuming 3 buttons\n");
+ button_info = 0x33;
+ } else if (!button_info) {
+ psmouse_warn(psmouse,
+ "got 0 in extended button data, assuming 3 buttons\n");
+ button_info = 0x33;
+ }
+ }
+
if ((button_info & 0x0f) >= 3)
- __set_bit(BTN_MIDDLE, psmouse->dev->keybit);
+ input_set_capability(psmouse->dev, EV_KEY, BTN_MIDDLE);
__set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit);
__set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit);
- trackpoint_defaults(psmouse->private);
-
- error = trackpoint_power_on_reset(ps2dev);
-
- /* Write defaults to TP only if reset fails. */
- if (error)
+ if (variant_id != TP_VARIANT_IBM ||
+ trackpoint_power_on_reset(ps2dev) != 0) {
+ /*
+ * Write defaults to TP if we did not reset the trackpoint.
+ */
trackpoint_sync(psmouse, false);
+ }
- error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group);
+ error = device_add_group(&ps2dev->serio->dev, &trackpoint_attr_group);
if (error) {
psmouse_err(psmouse,
"failed to create sysfs attributes, error: %d\n",
@@ -420,8 +477,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
}
psmouse_info(psmouse,
- "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
- firmware_id,
+ "%s TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
+ psmouse->vendor, firmware_id,
(button_info & 0xf0) >> 4, button_info & 0x0f);
return 0;
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 8805575..10a0391 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -21,10 +21,16 @@
#define TP_COMMAND 0xE2 /* Commands start with this */
#define TP_READ_ID 0xE1 /* Sent for device identification */
-#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
- /* by the firmware ID */
- /* Firmware ID includes 0x1, 0x2, 0x3 */
+/*
+ * Valid first byte responses to the "Read Secondary ID" (0xE1) command.
+ * 0x01 was the original IBM trackpoint, others implement very limited
+ * subset of trackpoint features.
+ */
+#define TP_VARIANT_IBM 0x01
+#define TP_VARIANT_ALPS 0x02
+#define TP_VARIANT_ELAN 0x03
+#define TP_VARIANT_NXP 0x04
/*
* Commands
@@ -136,18 +142,20 @@
#define MAKE_PS2_CMD(params, results, cmd) ((params<<12) | (results<<8) | (cmd))
-struct trackpoint_data
-{
- unsigned char sensitivity, speed, inertia, reach;
- unsigned char draghys, mindrag;
- unsigned char thresh, upthresh;
- unsigned char ztime, jenks;
- unsigned char drift_time;
+struct trackpoint_data {
+ u8 variant_id;
+ u8 firmware_id;
+
+ u8 sensitivity, speed, inertia, reach;
+ u8 draghys, mindrag;
+ u8 thresh, upthresh;
+ u8 ztime, jenks;
+ u8 drift_time;
/* toggles */
- unsigned char press_to_select;
- unsigned char skipback;
- unsigned char ext_dev;
+ bool press_to_select;
+ bool skipback;
+ bool ext_dev;
};
#ifdef CONFIG_MOUSE_PS2_TRACKPOINT
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 4f2bb59..141ea22 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -230,8 +230,10 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
"Failed to process interrupt request: %d\n", ret);
- if (count)
+ if (count) {
kfree(attn_data.data);
+ attn_data.data = NULL;
+ }
if (!kfifo_is_empty(&drvdata->attn_fifo))
return rmi_irq_fn(irq, dev_id);
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index ae966e3..8a07ae1 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -570,14 +570,19 @@ static int rmi_f01_probe(struct rmi_function *fn)
dev_set_drvdata(&fn->dev, f01);
- error = devm_device_add_group(&fn->rmi_dev->dev, &rmi_f01_attr_group);
+ error = sysfs_create_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
if (error)
- dev_warn(&fn->dev,
- "Failed to create attribute group: %d\n", error);
+ dev_warn(&fn->dev, "Failed to create sysfs group: %d\n", error);
return 0;
}
+static void rmi_f01_remove(struct rmi_function *fn)
+{
+ /* Note that the bus device is used, not the F01 device */
+ sysfs_remove_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
+}
+
static int rmi_f01_config(struct rmi_function *fn)
{
struct f01_data *f01 = dev_get_drvdata(&fn->dev);
@@ -717,6 +722,7 @@ struct rmi_function_handler rmi_f01_handler = {
},
.func = 0x01,
.probe = rmi_f01_probe,
+ .remove = rmi_f01_remove,
.config = rmi_f01_config,
.attention = rmi_f01_attention,
.suspend = rmi_f01_suspend,
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index 7ed828a..3486d94 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
int data, n, ret;
if (!np)
return -ENODEV;
- np = of_find_node_by_name(np, "touch");
+ np = of_get_child_by_name(np, "touch");
if (!np) {
dev_err(&pdev->dev, "Can't find touch node\n");
return -EINVAL;
@@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
if (data) {
ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
if (ret < 0)
- return -EINVAL;
+ goto err_put_node;
}
/* set tsi prebias time */
if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
if (ret < 0)
- return -EINVAL;
+ goto err_put_node;
}
/* set prebias & prechg time of pen detect */
data = 0;
@@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
if (data) {
ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
if (ret < 0)
- return -EINVAL;
+ goto err_put_node;
}
of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
+
+ of_node_put(np);
+
return 0;
+
+err_put_node:
+ of_node_put(np);
+
+ return -EINVAL;
}
#else
#define pm860x_touch_dt_init(x, y, z) (-1)
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index e102d776..a458e5e 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/async.h>
#include <linux/i2c.h>
@@ -1261,10 +1262,13 @@ static int elants_i2c_probe(struct i2c_client *client,
}
/*
- * Systems using device tree should set up interrupt via DTS,
- * the rest will use the default falling edge interrupts.
+ * Platform code (ACPI, DTS) should normally set up interrupt
+ * for us, but in case it did not let's fall back to using falling
+ * edge to be compatible with older Chromebooks.
*/
- irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING;
+ irqflags = irq_get_trigger_type(client->irq);
+ if (!irqflags)
+ irqflags = IRQF_TRIGGER_FALLING;
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, elants_i2c_irq,
diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c
index fc080a7..f1cd4dd 100644
--- a/drivers/input/touchscreen/hideep.c
+++ b/drivers/input/touchscreen/hideep.c
@@ -10,8 +10,7 @@
#include <linux/of.h>
#include <linux/firmware.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
index 8d7f9c8..9642f10 100644
--- a/drivers/input/touchscreen/of_touchscreen.c
+++ b/drivers/input/touchscreen/of_touchscreen.c
@@ -13,6 +13,7 @@
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
+#include <linux/module.h>
static bool touchscreen_get_prop_u32(struct device *dev,
const char *property,
@@ -185,3 +186,6 @@ void touchscreen_report_pos(struct input_dev *input,
input_report_abs(input, multitouch ? ABS_MT_POSITION_Y : ABS_Y, y);
}
EXPORT_SYMBOL(touchscreen_report_pos);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Device-tree helpers functions for touchscreen devices");
diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
index 26b1cb8..675efa9 100644
--- a/drivers/input/touchscreen/s6sy761.c
+++ b/drivers/input/touchscreen/s6sy761.c
@@ -1,13 +1,8 @@
-/*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd.
- * Author: Andi Shyti <andi.shyti@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Samsung S6SY761 Touchscreen device driver
- */
+// SPDX-License-Identifier: GPL-2.0
+// Samsung S6SY761 Touchscreen device driver
+//
+// Copyright (c) 2017 Samsung Electronics Co., Ltd.
+// Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
#include <asm/unaligned.h>
#include <linux/delay.h>
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index c12d018..2a123e2 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -1,13 +1,8 @@
-/*
- * Copyright (c) 2017 Samsung Electronics Co., Ltd.
- * Author: Andi Shyti <andi.shyti@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * STMicroelectronics FTS Touchscreen device driver
- */
+// SPDX-License-Identifier: GPL-2.0
+// STMicroelectronics FTS Touchscreen device driver
+//
+// Copyright (c) 2017 Samsung Electronics Co., Ltd.
+// Copyright (c) 2017 Andi Shyti <andi.shyti@samsung.com>
#include <linux/delay.h>
#include <linux/i2c.h>
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 7d5eb00..97baf88 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4184,7 +4184,7 @@ static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
struct irq_cfg *cfg);
static int irq_remapping_activate(struct irq_domain *domain,
- struct irq_data *irq_data, bool early)
+ struct irq_data *irq_data, bool reserve)
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index f122071..744592d 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1698,13 +1698,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
domain->geometry.aperture_end = (1UL << ias) - 1;
domain->geometry.force_aperture = true;
- smmu_domain->pgtbl_ops = pgtbl_ops;
ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
- if (ret < 0)
+ if (ret < 0) {
free_io_pgtable_ops(pgtbl_ops);
+ return ret;
+ }
- return ret;
+ smmu_domain->pgtbl_ops = pgtbl_ops;
+ return 0;
}
static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
@@ -1731,7 +1733,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
{
- int i;
+ int i, j;
struct arm_smmu_master_data *master = fwspec->iommu_priv;
struct arm_smmu_device *smmu = master->smmu;
@@ -1739,6 +1741,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
u32 sid = fwspec->ids[i];
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
+ /* Bridged PCI devices may end up with duplicated IDs */
+ for (j = 0; j < i; j++)
+ if (fwspec->ids[j] == sid)
+ break;
+ if (j < i)
+ continue;
+
arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
}
}
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 76a193c..66f69af 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1397,7 +1397,7 @@ static void intel_irq_remapping_free(struct irq_domain *domain,
}
static int intel_irq_remapping_activate(struct irq_domain *domain,
- struct irq_data *irq_data, bool early)
+ struct irq_data *irq_data, bool reserve)
{
intel_ir_reconfigure_irte(irq_data, true);
return 0;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index c70476b..d913aec 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -343,4 +343,12 @@ config MESON_IRQ_GPIO
help
Support Meson SoC Family GPIO Interrupt Multiplexer
+config GOLDFISH_PIC
+ bool "Goldfish programmable interrupt controller"
+ depends on MIPS && (GOLDFISH || COMPILE_TEST)
+ select IRQ_DOMAIN
+ help
+ Say yes here to enable Goldfish interrupt controller driver used
+ for Goldfish based virtual platforms.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index d2df34a..d27e3e3 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -84,3 +84,4 @@ obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o
obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
+obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 667b9e1..dfe4a46 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -98,13 +98,35 @@ static struct irq_chip bcm2836_arm_irqchip_gpu = {
.irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq,
};
-static void bcm2836_arm_irqchip_register_irq(int hwirq, struct irq_chip *chip)
-{
- int irq = irq_create_mapping(intc.domain, hwirq);
+static int bcm2836_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct irq_chip *chip;
+
+ switch (hw) {
+ case LOCAL_IRQ_CNTPSIRQ:
+ case LOCAL_IRQ_CNTPNSIRQ:
+ case LOCAL_IRQ_CNTHPIRQ:
+ case LOCAL_IRQ_CNTVIRQ:
+ chip = &bcm2836_arm_irqchip_timer;
+ break;
+ case LOCAL_IRQ_GPU_FAST:
+ chip = &bcm2836_arm_irqchip_gpu;
+ break;
+ case LOCAL_IRQ_PMU_FAST:
+ chip = &bcm2836_arm_irqchip_pmu;
+ break;
+ default:
+ pr_warn_once("Unexpected hw irq: %lu\n", hw);
+ return -EINVAL;
+ }
irq_set_percpu_devid(irq);
- irq_set_chip_and_handler(irq, chip, handle_percpu_devid_irq);
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ return 0;
}
static void
@@ -165,7 +187,8 @@ static int bcm2836_cpu_dying(unsigned int cpu)
#endif
static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
- .xlate = irq_domain_xlate_onecell
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = bcm2836_map,
};
static void
@@ -218,19 +241,6 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
if (!intc.domain)
panic("%pOF: unable to create IRQ domain\n", node);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPNSIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTHPIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTVIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_GPU_FAST,
- &bcm2836_arm_irqchip_gpu);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_PMU_FAST,
- &bcm2836_arm_irqchip_pmu);
-
bcm2836_arm_irqchip_smp_init();
set_handle_irq(bcm2836_arm_irqchip_handle_irq);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 4039e64..06f025f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2303,7 +2303,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
}
static int its_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *d, bool early)
+ struct irq_data *d, bool reserve)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
@@ -2818,7 +2818,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
}
static int its_vpe_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *d, bool early)
+ struct irq_data *d, bool reserve)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
struct its_node *its;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 9a7a150..a57c0fb 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1306,6 +1306,10 @@ gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
void __iomem *redist_base;
+ /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
redist_base = ioremap(gicc->gicr_base_address, size);
if (!redist_base)
return -ENOMEM;
@@ -1355,6 +1359,13 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
return 0;
+ /*
+ * It's perfectly valid firmware can pass disabled GICC entry, driver
+ * should not treat as errors, skip the entry instead of probe fail.
+ */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
return -ENODEV;
}
diff --git a/drivers/irqchip/irq-goldfish-pic.c b/drivers/irqchip/irq-goldfish-pic.c
new file mode 100644
index 0000000..2a92f03
--- /dev/null
+++ b/drivers/irqchip/irq-goldfish-pic.c
@@ -0,0 +1,139 @@
+/*
+ * Driver for MIPS Goldfish Programmable Interrupt Controller.
+ *
+ * Author: Miodrag Dinic <miodrag.dinic@mips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define GFPIC_NR_IRQS 32
+
+/* 8..39 Cascaded Goldfish PIC interrupts */
+#define GFPIC_IRQ_BASE 8
+
+#define GFPIC_REG_IRQ_PENDING 0x04
+#define GFPIC_REG_IRQ_DISABLE_ALL 0x08
+#define GFPIC_REG_IRQ_DISABLE 0x0c
+#define GFPIC_REG_IRQ_ENABLE 0x10
+
+struct goldfish_pic_data {
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+};
+
+static void goldfish_pic_cascade(struct irq_desc *desc)
+{
+ struct goldfish_pic_data *gfpic = irq_desc_get_handler_data(desc);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ u32 pending, hwirq, virq;
+
+ chained_irq_enter(host_chip, desc);
+
+ pending = readl(gfpic->base + GFPIC_REG_IRQ_PENDING);
+ while (pending) {
+ hwirq = __fls(pending);
+ virq = irq_linear_revmap(gfpic->irq_domain, hwirq);
+ generic_handle_irq(virq);
+ pending &= ~(1 << hwirq);
+ }
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static const struct irq_domain_ops goldfish_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init goldfish_pic_of_init(struct device_node *of_node,
+ struct device_node *parent)
+{
+ struct goldfish_pic_data *gfpic;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned int parent_irq;
+ int ret = 0;
+
+ gfpic = kzalloc(sizeof(*gfpic), GFP_KERNEL);
+ if (!gfpic) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ parent_irq = irq_of_parse_and_map(of_node, 0);
+ if (!parent_irq) {
+ pr_err("Failed to map parent IRQ!\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ gfpic->base = of_iomap(of_node, 0);
+ if (!gfpic->base) {
+ pr_err("Failed to map base address!\n");
+ ret = -ENOMEM;
+ goto out_unmap_irq;
+ }
+
+ /* Mask interrupts. */
+ writel(1, gfpic->base + GFPIC_REG_IRQ_DISABLE_ALL);
+
+ gc = irq_alloc_generic_chip("GFPIC", 1, GFPIC_IRQ_BASE, gfpic->base,
+ handle_level_irq);
+ if (!gc) {
+ pr_err("Failed to allocate chip structures!\n");
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ ct = gc->chip_types;
+ ct->regs.enable = GFPIC_REG_IRQ_ENABLE;
+ ct->regs.disable = GFPIC_REG_IRQ_DISABLE;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+
+ gfpic->irq_domain = irq_domain_add_legacy(of_node, GFPIC_NR_IRQS,
+ GFPIC_IRQ_BASE, 0,
+ &goldfish_irq_domain_ops,
+ NULL);
+ if (!gfpic->irq_domain) {
+ pr_err("Failed to add irqdomain!\n");
+ ret = -ENOMEM;
+ goto out_destroy_generic_chip;
+ }
+
+ irq_set_chained_handler_and_data(parent_irq,
+ goldfish_pic_cascade, gfpic);
+
+ pr_info("Successfully registered.\n");
+ return 0;
+
+out_destroy_generic_chip:
+ irq_destroy_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS),
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+out_iounmap:
+ iounmap(gfpic->base);
+out_unmap_irq:
+ irq_dispose_mapping(parent_irq);
+out_free:
+ kfree(gfpic);
+out_err:
+ pr_err("Failed to initialize! (errno = %d)\n", ret);
+ return ret;
+}
+
+IRQCHIP_DECLARE(google_gf_pic, "google,goldfish-pic", goldfish_pic_of_init);
diff --git a/drivers/irqchip/irq-ompic.c b/drivers/irqchip/irq-ompic.c
index cf6d0c4..e66ef43 100644
--- a/drivers/irqchip/irq-ompic.c
+++ b/drivers/irqchip/irq-ompic.c
@@ -171,9 +171,9 @@ static int __init ompic_of_init(struct device_node *node,
/* Setup the device */
ompic_base = ioremap(res.start, resource_size(&res));
- if (IS_ERR(ompic_base)) {
+ if (!ompic_base) {
pr_err("ompic: unable to map registers");
- return PTR_ERR(ompic_base);
+ return -ENOMEM;
}
irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 06f29cf..cee59fe 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -342,6 +342,9 @@ static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
*/
static struct lock_class_key intc_irqpin_irq_lock_class;
+/* And this is for the request mutex */
+static struct lock_class_key intc_irqpin_irq_request_class;
+
static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
@@ -352,7 +355,8 @@ static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
intc_irqpin_dbg(&p->irq[hw], "map");
irq_set_chip_data(virq, h->host_data);
- irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class);
+ irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class,
+ &intc_irqpin_irq_request_class);
irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
return 0;
}
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index fd83c7f..ede4fa0 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -188,6 +188,7 @@ void led_blink_set(struct led_classdev *led_cdev,
{
del_timer_sync(&led_cdev->blink_timer);
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
diff --git a/drivers/leds/leds-pm8058.c b/drivers/leds/leds-pm8058.c
index a526743..8988ba3 100644
--- a/drivers/leds/leds-pm8058.c
+++ b/drivers/leds/leds-pm8058.c
@@ -106,7 +106,7 @@ static int pm8058_led_probe(struct platform_device *pdev)
if (!led)
return -ENOMEM;
- led->ledtype = (u32)of_device_get_match_data(&pdev->dev);
+ led->ledtype = (u32)(unsigned long)of_device_get_match_data(&pdev->dev);
map = dev_get_regmap(pdev->dev.parent, NULL);
if (!map) {
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 2a953ef..10c0898 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -27,13 +27,6 @@ config NVM_DEBUG
It is required to create/remove targets without IOCTLs.
-config NVM_RRPC
- tristate "Round-robin Hybrid Open-Channel SSD target"
- ---help---
- Allows an open-channel SSD to be exposed as a block device to the
- host. The target is implemented using a linear mapping table and
- cost-based garbage collection. It is optimized for 4K IO sizes.
-
config NVM_PBLK
tristate "Physical Block Device Open-Channel SSD target"
---help---
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index 2c3fd9d..97d9d7c 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -4,7 +4,6 @@
#
obj-$(CONFIG_NVM) := core.o
-obj-$(CONFIG_NVM_RRPC) += rrpc.o
obj-$(CONFIG_NVM_PBLK) += pblk.o
pblk-y := pblk-init.o pblk-core.o pblk-rb.o \
pblk-write.o pblk-cache.o pblk-read.o \
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 83249b4..dcc9e62 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -45,12 +45,6 @@ struct nvm_dev_map {
int nr_chnls;
};
-struct nvm_area {
- struct list_head list;
- sector_t begin;
- sector_t end; /* end is excluded */
-};
-
static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
{
struct nvm_target *tgt;
@@ -62,6 +56,30 @@ static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
return NULL;
}
+static bool nvm_target_exists(const char *name)
+{
+ struct nvm_dev *dev;
+ struct nvm_target *tgt;
+ bool ret = false;
+
+ down_write(&nvm_lock);
+ list_for_each_entry(dev, &nvm_devices, devices) {
+ mutex_lock(&dev->mlock);
+ list_for_each_entry(tgt, &dev->targets, list) {
+ if (!strcmp(name, tgt->disk->disk_name)) {
+ ret = true;
+ mutex_unlock(&dev->mlock);
+ goto out;
+ }
+ }
+ mutex_unlock(&dev->mlock);
+ }
+
+out:
+ up_write(&nvm_lock);
+ return ret;
+}
+
static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
{
int i;
@@ -104,7 +122,7 @@ static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
if (clear) {
for (j = 0; j < ch_map->nr_luns; j++) {
int lun = j + lun_offs[j];
- int lunid = (ch * dev->geo.luns_per_chnl) + lun;
+ int lunid = (ch * dev->geo.nr_luns) + lun;
WARN_ON(!test_and_clear_bit(lunid,
dev->lun_map));
@@ -122,7 +140,8 @@ static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
}
static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
- int lun_begin, int lun_end)
+ u16 lun_begin, u16 lun_end,
+ u16 op)
{
struct nvm_tgt_dev *tgt_dev = NULL;
struct nvm_dev_map *dev_rmap = dev->rmap;
@@ -130,10 +149,10 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
struct ppa_addr *luns;
int nr_luns = lun_end - lun_begin + 1;
int luns_left = nr_luns;
- int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
- int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
- int bch = lun_begin / dev->geo.luns_per_chnl;
- int blun = lun_begin % dev->geo.luns_per_chnl;
+ int nr_chnls = nr_luns / dev->geo.nr_luns;
+ int nr_chnls_mod = nr_luns % dev->geo.nr_luns;
+ int bch = lun_begin / dev->geo.nr_luns;
+ int blun = lun_begin % dev->geo.nr_luns;
int lunid = 0;
int lun_balanced = 1;
int prev_nr_luns;
@@ -154,15 +173,15 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
if (!luns)
goto err_luns;
- prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
- dev->geo.luns_per_chnl : luns_left;
+ prev_nr_luns = (luns_left > dev->geo.nr_luns) ?
+ dev->geo.nr_luns : luns_left;
for (i = 0; i < nr_chnls; i++) {
struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
int *lun_roffs = ch_rmap->lun_offs;
struct nvm_ch_map *ch_map = &dev_map->chnls[i];
int *lun_offs;
- int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
- dev->geo.luns_per_chnl : luns_left;
+ int luns_in_chnl = (luns_left > dev->geo.nr_luns) ?
+ dev->geo.nr_luns : luns_left;
if (lun_balanced && prev_nr_luns != luns_in_chnl)
lun_balanced = 0;
@@ -199,8 +218,9 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
/* Target device only owns a portion of the physical device */
tgt_dev->geo.nr_chnls = nr_chnls;
- tgt_dev->geo.nr_luns = nr_luns;
- tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
+ tgt_dev->geo.all_luns = nr_luns;
+ tgt_dev->geo.nr_luns = (lun_balanced) ? prev_nr_luns : -1;
+ tgt_dev->geo.op = op;
tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
tgt_dev->q = dev->q;
tgt_dev->map = dev_map;
@@ -226,27 +246,79 @@ static const struct block_device_operations nvm_fops = {
.owner = THIS_MODULE,
};
-static struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
+static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
{
- struct nvm_tgt_type *tmp, *tt = NULL;
+ struct nvm_tgt_type *tt;
- if (lock)
- down_write(&nvm_tgtt_lock);
+ list_for_each_entry(tt, &nvm_tgt_types, list)
+ if (!strcmp(name, tt->name))
+ return tt;
- list_for_each_entry(tmp, &nvm_tgt_types, list)
- if (!strcmp(name, tmp->name)) {
- tt = tmp;
- break;
- }
+ return NULL;
+}
+
+static struct nvm_tgt_type *nvm_find_target_type(const char *name)
+{
+ struct nvm_tgt_type *tt;
+
+ down_write(&nvm_tgtt_lock);
+ tt = __nvm_find_target_type(name);
+ up_write(&nvm_tgtt_lock);
- if (lock)
- up_write(&nvm_tgtt_lock);
return tt;
}
+static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
+ int lun_end)
+{
+ if (lun_begin > lun_end || lun_end >= geo->all_luns) {
+ pr_err("nvm: lun out of bound (%u:%u > %u)\n",
+ lun_begin, lun_end, geo->all_luns - 1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __nvm_config_simple(struct nvm_dev *dev,
+ struct nvm_ioctl_create_simple *s)
+{
+ struct nvm_geo *geo = &dev->geo;
+
+ if (s->lun_begin == -1 && s->lun_end == -1) {
+ s->lun_begin = 0;
+ s->lun_end = geo->all_luns - 1;
+ }
+
+ return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
+}
+
+static int __nvm_config_extended(struct nvm_dev *dev,
+ struct nvm_ioctl_create_extended *e)
+{
+ struct nvm_geo *geo = &dev->geo;
+
+ if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
+ e->lun_begin = 0;
+ e->lun_end = dev->geo.all_luns - 1;
+ }
+
+ /* op not set falls into target's default */
+ if (e->op == 0xFFFF)
+ e->op = NVM_TARGET_DEFAULT_OP;
+
+ if (e->op < NVM_TARGET_MIN_OP ||
+ e->op > NVM_TARGET_MAX_OP) {
+ pr_err("nvm: invalid over provisioning value\n");
+ return -EINVAL;
+ }
+
+ return nvm_config_check_luns(geo, e->lun_begin, e->lun_end);
+}
+
static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
{
- struct nvm_ioctl_create_simple *s = &create->conf.s;
+ struct nvm_ioctl_create_extended e;
struct request_queue *tqueue;
struct gendisk *tdisk;
struct nvm_tgt_type *tt;
@@ -255,22 +327,41 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
void *targetdata;
int ret;
- tt = nvm_find_target_type(create->tgttype, 1);
+ switch (create->conf.type) {
+ case NVM_CONFIG_TYPE_SIMPLE:
+ ret = __nvm_config_simple(dev, &create->conf.s);
+ if (ret)
+ return ret;
+
+ e.lun_begin = create->conf.s.lun_begin;
+ e.lun_end = create->conf.s.lun_end;
+ e.op = NVM_TARGET_DEFAULT_OP;
+ break;
+ case NVM_CONFIG_TYPE_EXTENDED:
+ ret = __nvm_config_extended(dev, &create->conf.e);
+ if (ret)
+ return ret;
+
+ e = create->conf.e;
+ break;
+ default:
+ pr_err("nvm: config type not valid\n");
+ return -EINVAL;
+ }
+
+ tt = nvm_find_target_type(create->tgttype);
if (!tt) {
pr_err("nvm: target type %s not found\n", create->tgttype);
return -EINVAL;
}
- mutex_lock(&dev->mlock);
- t = nvm_find_target(dev, create->tgtname);
- if (t) {
- pr_err("nvm: target name already exists.\n");
- mutex_unlock(&dev->mlock);
+ if (nvm_target_exists(create->tgtname)) {
+ pr_err("nvm: target name already exists (%s)\n",
+ create->tgtname);
return -EINVAL;
}
- mutex_unlock(&dev->mlock);
- ret = nvm_reserve_luns(dev, s->lun_begin, s->lun_end);
+ ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
if (ret)
return ret;
@@ -280,7 +371,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
goto err_reserve;
}
- tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
+ tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
if (!tgt_dev) {
pr_err("nvm: could not create target device\n");
ret = -ENOMEM;
@@ -350,7 +441,7 @@ err_dev:
err_t:
kfree(t);
err_reserve:
- nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
+ nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
return ret;
}
@@ -420,7 +511,7 @@ static int nvm_register_map(struct nvm_dev *dev)
for (i = 0; i < dev->geo.nr_chnls; i++) {
struct nvm_ch_map *ch_rmap;
int *lun_roffs;
- int luns_in_chnl = dev->geo.luns_per_chnl;
+ int luns_in_chnl = dev->geo.nr_luns;
ch_rmap = &rmap->chnls[i];
@@ -524,41 +615,12 @@ static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
}
-void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
- int len)
-{
- struct nvm_geo *geo = &dev->geo;
- struct nvm_dev_map *dev_rmap = dev->rmap;
- u64 i;
-
- for (i = 0; i < len; i++) {
- struct nvm_ch_map *ch_rmap;
- int *lun_roffs;
- struct ppa_addr gaddr;
- u64 pba = le64_to_cpu(entries[i]);
- u64 diff;
-
- if (!pba)
- continue;
-
- gaddr = linear_to_generic_addr(geo, pba);
- ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
- lun_roffs = ch_rmap->lun_offs;
-
- diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
- (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
-
- entries[i] -= cpu_to_le64(diff);
- }
-}
-EXPORT_SYMBOL(nvm_part_to_tgt);
-
int nvm_register_tgt_type(struct nvm_tgt_type *tt)
{
int ret = 0;
down_write(&nvm_tgtt_lock);
- if (nvm_find_target_type(tt->name, 0))
+ if (__nvm_find_target_type(tt->name))
ret = -EEXIST;
else
list_add(&tt->list, &nvm_tgt_types);
@@ -726,112 +788,6 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
}
EXPORT_SYMBOL(nvm_submit_io_sync);
-int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
- int nr_ppas)
-{
- struct nvm_geo *geo = &tgt_dev->geo;
- struct nvm_rq rqd;
- int ret;
-
- memset(&rqd, 0, sizeof(struct nvm_rq));
-
- rqd.opcode = NVM_OP_ERASE;
- rqd.flags = geo->plane_mode >> 1;
-
- ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
- if (ret)
- return ret;
-
- ret = nvm_submit_io_sync(tgt_dev, &rqd);
- if (ret) {
- pr_err("rrpr: erase I/O submission failed: %d\n", ret);
- goto free_ppa_list;
- }
-
-free_ppa_list:
- nvm_free_rqd_ppalist(tgt_dev, &rqd);
-
- return ret;
-}
-EXPORT_SYMBOL(nvm_erase_sync);
-
-int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
- nvm_l2p_update_fn *update_l2p, void *priv)
-{
- struct nvm_dev *dev = tgt_dev->parent;
-
- if (!dev->ops->get_l2p_tbl)
- return 0;
-
- return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
-}
-EXPORT_SYMBOL(nvm_get_l2p_tbl);
-
-int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_geo *geo = &dev->geo;
- struct nvm_area *area, *prev, *next;
- sector_t begin = 0;
- sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
-
- if (len > max_sectors)
- return -EINVAL;
-
- area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
- if (!area)
- return -ENOMEM;
-
- prev = NULL;
-
- spin_lock(&dev->lock);
- list_for_each_entry(next, &dev->area_list, list) {
- if (begin + len > next->begin) {
- begin = next->end;
- prev = next;
- continue;
- }
- break;
- }
-
- if ((begin + len) > max_sectors) {
- spin_unlock(&dev->lock);
- kfree(area);
- return -EINVAL;
- }
-
- area->begin = *lba = begin;
- area->end = begin + len;
-
- if (prev) /* insert into sorted order */
- list_add(&area->list, &prev->list);
- else
- list_add(&area->list, &dev->area_list);
- spin_unlock(&dev->lock);
-
- return 0;
-}
-EXPORT_SYMBOL(nvm_get_area);
-
-void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_area *area;
-
- spin_lock(&dev->lock);
- list_for_each_entry(area, &dev->area_list, list) {
- if (area->begin != begin)
- continue;
-
- list_del(&area->list);
- spin_unlock(&dev->lock);
- kfree(area);
- return;
- }
- spin_unlock(&dev->lock);
-}
-EXPORT_SYMBOL(nvm_put_area);
-
void nvm_end_io(struct nvm_rq *rqd)
{
struct nvm_tgt_dev *tgt_dev = rqd->dev;
@@ -858,10 +814,10 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
struct nvm_geo *geo = &dev->geo;
int blk, offset, pl, blktype;
- if (nr_blks != geo->blks_per_lun * geo->plane_mode)
+ if (nr_blks != geo->nr_chks * geo->plane_mode)
return -EINVAL;
- for (blk = 0; blk < geo->blks_per_lun; blk++) {
+ for (blk = 0; blk < geo->nr_chks; blk++) {
offset = blk * geo->plane_mode;
blktype = blks[offset];
@@ -877,7 +833,7 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
blks[blk] = blktype;
}
- return geo->blks_per_lun;
+ return geo->nr_chks;
}
EXPORT_SYMBOL(nvm_bb_tbl_fold);
@@ -892,53 +848,6 @@ int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
}
EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
-static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
-{
- struct nvm_geo *geo = &dev->geo;
- int i;
-
- dev->lps_per_blk = geo->pgs_per_blk;
- dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
- if (!dev->lptbl)
- return -ENOMEM;
-
- /* Just a linear array */
- for (i = 0; i < dev->lps_per_blk; i++)
- dev->lptbl[i] = i;
-
- return 0;
-}
-
-static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
-{
- int i, p;
- struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
-
- if (!mlc->num_pairs)
- return 0;
-
- dev->lps_per_blk = mlc->num_pairs;
- dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
- if (!dev->lptbl)
- return -ENOMEM;
-
- /* The lower page table encoding consists of a list of bytes, where each
- * has a lower and an upper half. The first half byte maintains the
- * increment value and every value after is an offset added to the
- * previous incrementation value
- */
- dev->lptbl[0] = mlc->pairs[0] & 0xF;
- for (i = 1; i < dev->lps_per_blk; i++) {
- p = mlc->pairs[i >> 1];
- if (i & 0x1) /* upper */
- dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
- else /* lower */
- dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
- }
-
- return 0;
-}
-
static int nvm_core_init(struct nvm_dev *dev)
{
struct nvm_id *id = &dev->identity;
@@ -946,66 +855,44 @@ static int nvm_core_init(struct nvm_dev *dev)
struct nvm_geo *geo = &dev->geo;
int ret;
+ memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
+
+ if (grp->mtype != 0) {
+ pr_err("nvm: memory type not supported\n");
+ return -EINVAL;
+ }
+
/* Whole device values */
geo->nr_chnls = grp->num_ch;
- geo->luns_per_chnl = grp->num_lun;
-
- /* Generic device values */
- geo->pgs_per_blk = grp->num_pg;
- geo->blks_per_lun = grp->num_blk;
- geo->nr_planes = grp->num_pln;
- geo->fpg_size = grp->fpg_sz;
- geo->pfpg_size = grp->fpg_sz * grp->num_pln;
+ geo->nr_luns = grp->num_lun;
+
+ /* Generic device geometry values */
+ geo->ws_min = grp->ws_min;
+ geo->ws_opt = grp->ws_opt;
+ geo->ws_seq = grp->ws_seq;
+ geo->ws_per_chk = grp->ws_per_chk;
+ geo->nr_chks = grp->num_chk;
geo->sec_size = grp->csecs;
geo->oob_size = grp->sos;
- geo->sec_per_pg = grp->fpg_sz / grp->csecs;
geo->mccap = grp->mccap;
- memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
-
- geo->plane_mode = NVM_PLANE_SINGLE;
geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
- if (grp->mpos & 0x020202)
- geo->plane_mode = NVM_PLANE_DOUBLE;
- if (grp->mpos & 0x040404)
- geo->plane_mode = NVM_PLANE_QUAD;
+ geo->sec_per_chk = grp->clba;
+ geo->sec_per_lun = geo->sec_per_chk * geo->nr_chks;
+ geo->all_luns = geo->nr_luns * geo->nr_chnls;
- if (grp->mtype != 0) {
- pr_err("nvm: memory type not supported\n");
- return -EINVAL;
- }
-
- /* calculated values */
+ /* 1.2 spec device geometry values */
+ geo->plane_mode = 1 << geo->ws_seq;
+ geo->nr_planes = geo->ws_opt / geo->ws_min;
+ geo->sec_per_pg = geo->ws_min;
geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
- geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
- geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
- geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
- dev->total_secs = geo->nr_luns * geo->sec_per_lun;
- dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
+ dev->total_secs = geo->all_luns * geo->sec_per_lun;
+ dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
sizeof(unsigned long), GFP_KERNEL);
if (!dev->lun_map)
return -ENOMEM;
- switch (grp->fmtype) {
- case NVM_ID_FMTYPE_SLC:
- if (nvm_init_slc_tbl(dev, grp)) {
- ret = -ENOMEM;
- goto err_fmtype;
- }
- break;
- case NVM_ID_FMTYPE_MLC:
- if (nvm_init_mlc_tbl(dev, grp)) {
- ret = -ENOMEM;
- goto err_fmtype;
- }
- break;
- default:
- pr_err("nvm: flash type not supported\n");
- ret = -EINVAL;
- goto err_fmtype;
- }
-
INIT_LIST_HEAD(&dev->area_list);
INIT_LIST_HEAD(&dev->targets);
mutex_init(&dev->mlock);
@@ -1031,7 +918,6 @@ static void nvm_free(struct nvm_dev *dev)
dev->ops->destroy_dma_pool(dev->dma_pool);
nvm_unregister_map(dev);
- kfree(dev->lptbl);
kfree(dev->lun_map);
kfree(dev);
}
@@ -1062,8 +948,8 @@ static int nvm_init(struct nvm_dev *dev)
pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
dev->name, geo->sec_per_pg, geo->nr_planes,
- geo->pgs_per_blk, geo->blks_per_lun,
- geo->nr_luns, geo->nr_chnls);
+ geo->ws_per_chk, geo->nr_chks,
+ geo->all_luns, geo->nr_chnls);
return 0;
err:
pr_err("nvm: failed to initialize nvm\n");
@@ -1135,7 +1021,6 @@ EXPORT_SYMBOL(nvm_unregister);
static int __nvm_configure_create(struct nvm_ioctl_create *create)
{
struct nvm_dev *dev;
- struct nvm_ioctl_create_simple *s;
down_write(&nvm_lock);
dev = nvm_find_nvm_dev(create->dev);
@@ -1146,23 +1031,6 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
return -EINVAL;
}
- if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
- pr_err("nvm: config type not valid\n");
- return -EINVAL;
- }
- s = &create->conf.s;
-
- if (s->lun_begin == -1 && s->lun_end == -1) {
- s->lun_begin = 0;
- s->lun_end = dev->geo.nr_luns - 1;
- }
-
- if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) {
- pr_err("nvm: lun out of bound (%u:%u > %u)\n",
- s->lun_begin, s->lun_end, dev->geo.nr_luns - 1);
- return -EINVAL;
- }
-
return nvm_create_tgt(dev, create);
}
@@ -1262,6 +1130,12 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
return -EFAULT;
+ if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
+ create.conf.e.rsv != 0) {
+ pr_err("nvm: reserved config field in use\n");
+ return -EINVAL;
+ }
+
create.dev[DISK_NAME_LEN - 1] = '\0';
create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
create.tgtname[DISK_NAME_LEN - 1] = '\0';
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index 0d227ef..000fcad 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -19,12 +19,16 @@
int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
{
+ struct request_queue *q = pblk->dev->q;
struct pblk_w_ctx w_ctx;
sector_t lba = pblk_get_lba(bio);
+ unsigned long start_time = jiffies;
unsigned int bpos, pos;
int nr_entries = pblk_get_secs(bio);
int i, ret;
+ generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0);
+
/* Update the write buffer head (mem) with the entries that we can
* write. The write in itself cannot fail, so there is no need to
* rollback from here on.
@@ -67,6 +71,7 @@ retry:
pblk_rl_inserted(&pblk->rl, nr_entries);
out:
+ generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time);
pblk_write_should_kick(pblk);
return ret;
}
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 76516ee..0487b93 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -32,8 +32,8 @@ static void pblk_line_mark_bb(struct work_struct *work)
struct pblk_line *line;
int pos;
- line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
- pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
+ line = &pblk->lines[pblk_ppa_to_line(*ppa)];
+ pos = pblk_ppa_to_pos(&dev->geo, *ppa);
pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
line->id, pos);
@@ -48,7 +48,7 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- int pos = pblk_dev_ppa_to_pos(geo, *ppa);
+ int pos = pblk_ppa_to_pos(geo, *ppa);
pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
atomic_long_inc(&pblk->erase_failed);
@@ -66,7 +66,7 @@ static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
{
struct pblk_line *line;
- line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
+ line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
atomic_dec(&line->left_seblks);
if (rqd->error) {
@@ -144,7 +144,7 @@ void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
BUG_ON(pblk_ppa_empty(ppa));
#endif
- line_id = pblk_tgt_ppa_to_line(ppa);
+ line_id = pblk_ppa_to_line(ppa);
line = &pblk->lines[line_id];
paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
@@ -650,7 +650,7 @@ next_rq:
} else {
for (i = 0; i < rqd.nr_ppas; ) {
struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
- int pos = pblk_dev_ppa_to_pos(geo, ppa);
+ int pos = pblk_ppa_to_pos(geo, ppa);
int read_type = PBLK_READ_RANDOM;
if (pblk_io_aligned(pblk, rq_ppas))
@@ -668,7 +668,7 @@ next_rq:
}
ppa = addr_to_gen_ppa(pblk, paddr, id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
@@ -742,7 +742,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
cmd_op = NVM_OP_PWRITE;
flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
lba_list = emeta_to_lbas(pblk, line->emeta->buf);
- } else if (dir == PBLK_READ) {
+ } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
bio_op = REQ_OP_READ;
cmd_op = NVM_OP_PREAD;
flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
@@ -802,7 +802,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
if (rqd.error) {
if (dir == PBLK_WRITE)
pblk_log_write_err(pblk, &rqd);
- else
+ else if (dir == PBLK_READ)
pblk_log_read_err(pblk, &rqd);
}
@@ -816,7 +816,7 @@ int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
{
u64 bpaddr = pblk_line_smeta_start(pblk, line);
- return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ);
+ return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
}
int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
@@ -854,8 +854,8 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_geo *geo = &dev->geo;
pr_err("pblk: could not sync erase line:%d,blk:%d\n",
- pblk_dev_ppa_to_line(ppa),
- pblk_dev_ppa_to_pos(geo, ppa));
+ pblk_ppa_to_line(ppa),
+ pblk_ppa_to_pos(geo, ppa));
rqd.error = ret;
goto out;
@@ -979,7 +979,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
/* Start metadata */
smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
- smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
+ smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
/* Fill metadata among lines */
if (cur) {
@@ -1032,7 +1032,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
lm->sec_per_line);
bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
lm->sec_per_line);
- line->sec_in_line -= geo->sec_per_blk;
+ line->sec_in_line -= geo->sec_per_chk;
if (bit >= lm->emeta_bb)
nr_bb++;
}
@@ -1145,7 +1145,7 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
}
spin_unlock(&l_mg->free_lock);
- pblk_rl_free_lines_dec(&pblk->rl, line);
+ pblk_rl_free_lines_dec(&pblk->rl, line, true);
if (!pblk_line_init_bb(pblk, line, 0)) {
list_add(&line->list, &l_mg->free_list);
@@ -1233,7 +1233,7 @@ retry:
l_mg->data_line = retry_line;
spin_unlock(&l_mg->free_lock);
- pblk_rl_free_lines_dec(&pblk->rl, retry_line);
+ pblk_rl_free_lines_dec(&pblk->rl, line, false);
if (pblk_line_erase(pblk, retry_line))
goto retry;
@@ -1252,7 +1252,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *line;
- int is_next = 0;
spin_lock(&l_mg->free_lock);
line = pblk_line_get(pblk);
@@ -1280,7 +1279,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
} else {
l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
l_mg->data_next->type = PBLK_LINETYPE_DATA;
- is_next = 1;
}
spin_unlock(&l_mg->free_lock);
@@ -1290,10 +1288,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
return NULL;
}
- pblk_rl_free_lines_dec(&pblk->rl, line);
- if (is_next)
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
-
retry_setup:
if (!pblk_line_init_metadata(pblk, line, NULL)) {
line = pblk_line_retry(pblk, line);
@@ -1311,6 +1305,8 @@ retry_setup:
goto retry_setup;
}
+ pblk_rl_free_lines_dec(&pblk->rl, line, true);
+
return line;
}
@@ -1395,7 +1391,6 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *cur, *new = NULL;
unsigned int left_seblks;
- int is_next = 0;
cur = l_mg->data_line;
new = l_mg->data_next;
@@ -1444,6 +1439,8 @@ retry_setup:
goto retry_setup;
}
+ pblk_rl_free_lines_dec(&pblk->rl, new, true);
+
/* Allocate next line for preparation */
spin_lock(&l_mg->free_lock);
l_mg->data_next = pblk_line_get(pblk);
@@ -1457,13 +1454,9 @@ retry_setup:
} else {
l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
l_mg->data_next->type = PBLK_LINETYPE_DATA;
- is_next = 1;
}
spin_unlock(&l_mg->free_lock);
- if (is_next)
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
-
out:
return new;
}
@@ -1561,8 +1554,8 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_geo *geo = &dev->geo;
pr_err("pblk: could not async erase line:%d,blk:%d\n",
- pblk_dev_ppa_to_line(ppa),
- pblk_dev_ppa_to_pos(geo, ppa));
+ pblk_ppa_to_line(ppa),
+ pblk_ppa_to_pos(geo, ppa));
}
return err;
@@ -1746,7 +1739,7 @@ void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_lun *rlun;
- int nr_luns = geo->nr_luns;
+ int nr_luns = geo->all_luns;
int bit = -1;
while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
@@ -1884,7 +1877,7 @@ void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
/* If the L2P entry maps to a line, the reference is valid */
if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
- int line_id = pblk_dev_ppa_to_line(ppa);
+ int line_id = pblk_ppa_to_line(ppa);
struct pblk_line *line = &pblk->lines[line_id];
kref_get(&line->ref);
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 9c8e114..3d89938 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -169,7 +169,14 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
* the line untouched. TODO: Implement a recovery routine that scans and
* moves all sectors on the line.
*/
- lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
+
+ ret = pblk_recov_check_emeta(pblk, emeta_buf);
+ if (ret) {
+ pr_err("pblk: inconsistent emeta (line %d)\n", line->id);
+ goto fail_free_emeta;
+ }
+
+ lba_list = emeta_to_lbas(pblk, emeta_buf);
if (!lba_list) {
pr_err("pblk: could not interpret emeta (line %d)\n", line->id);
goto fail_free_emeta;
@@ -519,22 +526,12 @@ void pblk_gc_should_start(struct pblk *pblk)
}
}
-/*
- * If flush_wq == 1 then no lock should be held by the caller since
- * flush_workqueue can sleep
- */
-static void pblk_gc_stop(struct pblk *pblk, int flush_wq)
-{
- pblk->gc.gc_active = 0;
- pr_debug("pblk: gc stop\n");
-}
-
void pblk_gc_should_stop(struct pblk *pblk)
{
struct pblk_gc *gc = &pblk->gc;
if (gc->gc_active && !gc->gc_forced)
- pblk_gc_stop(pblk, 0);
+ gc->gc_active = 0;
}
void pblk_gc_should_kick(struct pblk *pblk)
@@ -660,7 +657,7 @@ void pblk_gc_exit(struct pblk *pblk)
gc->gc_enabled = 0;
del_timer_sync(&gc->gc_timer);
- pblk_gc_stop(pblk, 1);
+ gc->gc_active = 0;
if (gc->gc_ts)
kthread_stop(gc->gc_ts);
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 695826a..93d671c 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -169,8 +169,8 @@ static int pblk_set_ppaf(struct pblk *pblk)
}
ppaf.ch_len = power_len;
- power_len = get_count_order(geo->luns_per_chnl);
- if (1 << power_len != geo->luns_per_chnl) {
+ power_len = get_count_order(geo->nr_luns);
+ if (1 << power_len != geo->nr_luns) {
pr_err("pblk: supports only power-of-two LUN config.\n");
return -EINVAL;
}
@@ -254,7 +254,7 @@ static int pblk_core_init(struct pblk *pblk)
struct nvm_geo *geo = &dev->geo;
pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
- geo->nr_planes * geo->nr_luns;
+ geo->nr_planes * geo->all_luns;
if (pblk_init_global_caches(pblk))
return -ENOMEM;
@@ -270,21 +270,22 @@ static int pblk_core_init(struct pblk *pblk)
if (!pblk->gen_ws_pool)
goto free_page_bio_pool;
- pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
+ pblk->rec_pool = mempool_create_slab_pool(geo->all_luns,
+ pblk_rec_cache);
if (!pblk->rec_pool)
goto free_gen_ws_pool;
- pblk->r_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->r_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_g_rq_cache);
if (!pblk->r_rq_pool)
goto free_rec_pool;
- pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->e_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_g_rq_cache);
if (!pblk->e_rq_pool)
goto free_r_rq_pool;
- pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->w_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_w_rq_cache);
if (!pblk->w_rq_pool)
goto free_e_rq_pool;
@@ -354,6 +355,8 @@ static void pblk_core_free(struct pblk *pblk)
mempool_destroy(pblk->e_rq_pool);
mempool_destroy(pblk->w_rq_pool);
+ pblk_rwb_free(pblk);
+
pblk_free_global_caches(pblk);
}
@@ -409,7 +412,7 @@ static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
u8 *blks;
int nr_blks, ret;
- nr_blks = geo->blks_per_lun * geo->plane_mode;
+ nr_blks = geo->nr_chks * geo->plane_mode;
blks = kmalloc(nr_blks, GFP_KERNEL);
if (!blks)
return -ENOMEM;
@@ -482,20 +485,21 @@ static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
int i, ret;
/* TODO: Implement unbalanced LUN support */
- if (geo->luns_per_chnl < 0) {
+ if (geo->nr_luns < 0) {
pr_err("pblk: unbalanced LUN config.\n");
return -EINVAL;
}
- pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
+ pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
+ GFP_KERNEL);
if (!pblk->luns)
return -ENOMEM;
- for (i = 0; i < geo->nr_luns; i++) {
+ for (i = 0; i < geo->all_luns; i++) {
/* Stripe across channels */
int ch = i % geo->nr_chnls;
int lun_raw = i / geo->nr_chnls;
- int lunid = lun_raw + ch * geo->luns_per_chnl;
+ int lunid = lun_raw + ch * geo->nr_luns;
rlun = &pblk->luns[i];
rlun->bppa = luns[lunid];
@@ -577,22 +581,37 @@ static unsigned int calc_emeta_len(struct pblk *pblk)
static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_line_meta *lm = &pblk->lm;
struct nvm_geo *geo = &dev->geo;
sector_t provisioned;
+ int sec_meta, blk_meta;
- pblk->over_pct = 20;
+ if (geo->op == NVM_TARGET_DEFAULT_OP)
+ pblk->op = PBLK_DEFAULT_OP;
+ else
+ pblk->op = geo->op;
provisioned = nr_free_blks;
- provisioned *= (100 - pblk->over_pct);
+ provisioned *= (100 - pblk->op);
sector_div(provisioned, 100);
+ pblk->op_blks = nr_free_blks - provisioned;
+
/* Internally pblk manages all free blocks, but all calculations based
* on user capacity consider only provisioned blocks
*/
pblk->rl.total_blocks = nr_free_blks;
- pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
- pblk->capacity = provisioned * geo->sec_per_blk;
+ pblk->rl.nr_secs = nr_free_blks * geo->sec_per_chk;
+
+ /* Consider sectors used for metadata */
+ sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
+ blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
+
+ pblk->capacity = (provisioned - blk_meta) * geo->sec_per_chk;
+
atomic_set(&pblk->rl.free_blocks, nr_free_blks);
+ atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
}
static int pblk_lines_alloc_metadata(struct pblk *pblk)
@@ -683,7 +702,7 @@ static int pblk_lines_init(struct pblk *pblk)
int i, ret;
pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
- max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
+ max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
max_write_ppas : nvm_max_phys_sects(dev);
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
@@ -693,26 +712,26 @@ static int pblk_lines_init(struct pblk *pblk)
return -EINVAL;
}
- div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
+ div_u64_rem(geo->sec_per_chk, pblk->min_write_pgs, &mod);
if (mod) {
pr_err("pblk: bad configuration of sectors/pages\n");
return -EINVAL;
}
- l_mg->nr_lines = geo->blks_per_lun;
+ l_mg->nr_lines = geo->nr_chks;
l_mg->log_line = l_mg->data_line = NULL;
l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
l_mg->nr_free_lines = 0;
bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
- lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
- lm->blk_per_line = geo->nr_luns;
- lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
+ lm->sec_per_line = geo->sec_per_chk * geo->all_luns;
+ lm->blk_per_line = geo->all_luns;
+ lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
- lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
+ lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
lm->mid_thrs = lm->sec_per_line / 2;
lm->high_thrs = lm->sec_per_line / 4;
- lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
+ lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
/* Calculate necessary pages for smeta. See comment over struct
* line_smeta definition
@@ -742,12 +761,12 @@ add_emeta_page:
goto add_emeta_page;
}
- lm->emeta_bb = geo->nr_luns > i ? geo->nr_luns - i : 0;
+ lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
lm->min_blk_line = 1;
- if (geo->nr_luns > 1)
+ if (geo->all_luns > 1)
lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
- lm->emeta_sec[0], geo->sec_per_blk);
+ lm->emeta_sec[0], geo->sec_per_chk);
if (lm->min_blk_line > lm->blk_per_line) {
pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
@@ -772,7 +791,7 @@ add_emeta_page:
goto fail_free_bb_template;
}
- bb_distance = (geo->nr_luns) * geo->sec_per_pl;
+ bb_distance = (geo->all_luns) * geo->sec_per_pl;
for (i = 0; i < lm->sec_per_line; i += bb_distance)
bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
@@ -844,7 +863,7 @@ add_emeta_page:
pblk_set_provision(pblk, nr_free_blks);
/* Cleanup per-LUN bad block lists - managed within lines on run-time */
- for (i = 0; i < geo->nr_luns; i++)
+ for (i = 0; i < geo->all_luns; i++)
kfree(pblk->luns[i].bb_list);
return 0;
@@ -858,7 +877,7 @@ fail_free_bb_template:
fail_free_meta:
pblk_line_meta_free(pblk);
fail:
- for (i = 0; i < geo->nr_luns; i++)
+ for (i = 0; i < geo->all_luns; i++)
kfree(pblk->luns[i].bb_list);
return ret;
@@ -866,15 +885,19 @@ fail:
static int pblk_writer_init(struct pblk *pblk)
{
- timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
- mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
-
pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
if (IS_ERR(pblk->writer_ts)) {
- pr_err("pblk: could not allocate writer kthread\n");
- return PTR_ERR(pblk->writer_ts);
+ int err = PTR_ERR(pblk->writer_ts);
+
+ if (err != -EINTR)
+ pr_err("pblk: could not allocate writer kthread (%d)\n",
+ err);
+ return err;
}
+ timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
+ mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
+
return 0;
}
@@ -910,7 +933,6 @@ static void pblk_tear_down(struct pblk *pblk)
pblk_pipeline_stop(pblk);
pblk_writer_stop(pblk);
pblk_rb_sync_l2p(&pblk->rwb);
- pblk_rwb_free(pblk);
pblk_rl_free(&pblk->rl);
pr_debug("pblk: consistent tear down\n");
@@ -1025,7 +1047,8 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
ret = pblk_writer_init(pblk);
if (ret) {
- pr_err("pblk: could not initialize write thread\n");
+ if (ret != -EINTR)
+ pr_err("pblk: could not initialize write thread\n");
goto fail_free_lines;
}
@@ -1041,13 +1064,14 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
blk_queue_write_cache(tqueue, true, false);
- tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
+ tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size;
tqueue->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
- pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
- geo->nr_luns, pblk->l_mg.nr_lines,
+ pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
+ tdisk->disk_name,
+ geo->all_luns, pblk->l_mg.nr_lines,
(unsigned long long)pblk->rl.nr_secs,
pblk->rwb.nr_entries);
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 6f3ecde..7445e64 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -146,7 +146,7 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
return;
/* Erase blocks that are bad in this line but might not be in next */
- if (unlikely(ppa_empty(*erase_ppa)) &&
+ if (unlikely(pblk_ppa_empty(*erase_ppa)) &&
bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
int bit = -1;
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index b8f78e4..ec8fc31 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -54,7 +54,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
rb->seg_size = (1 << power_seg_sz);
rb->nr_entries = (1 << power_size);
rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
- rb->sync_point = EMPTY_ENTRY;
+ rb->flush_point = EMPTY_ENTRY;
spin_lock_init(&rb->w_lock);
spin_lock_init(&rb->s_lock);
@@ -112,7 +112,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
up_write(&pblk_rb_lock);
#ifdef CONFIG_NVM_DEBUG
- atomic_set(&rb->inflight_sync_point, 0);
+ atomic_set(&rb->inflight_flush_point, 0);
#endif
/*
@@ -226,7 +226,7 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
entry->cacheline);
- line = &pblk->lines[pblk_tgt_ppa_to_line(w_ctx->ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
kref_put(&line->ref, pblk_line_put);
clean_wctx(w_ctx);
rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1);
@@ -349,35 +349,35 @@ void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
smp_store_release(&entry->w_ctx.flags, flags);
}
-static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
+static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
unsigned int pos)
{
struct pblk_rb_entry *entry;
- unsigned int subm, sync_point;
+ unsigned int sync, flush_point;
- subm = READ_ONCE(rb->subm);
+ sync = READ_ONCE(rb->sync);
+
+ if (pos == sync)
+ return 0;
#ifdef CONFIG_NVM_DEBUG
- atomic_inc(&rb->inflight_sync_point);
+ atomic_inc(&rb->inflight_flush_point);
#endif
- if (pos == subm)
- return 0;
+ flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
+ entry = &rb->entries[flush_point];
- sync_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
- entry = &rb->entries[sync_point];
+ pblk_rb_sync_init(rb, NULL);
- /* Protect syncs */
- smp_store_release(&rb->sync_point, sync_point);
+ /* Protect flush points */
+ smp_store_release(&rb->flush_point, flush_point);
- if (!bio)
- return 0;
+ if (bio)
+ bio_list_add(&entry->w_ctx.bios, bio);
- spin_lock_irq(&rb->s_lock);
- bio_list_add(&entry->w_ctx.bios, bio);
- spin_unlock_irq(&rb->s_lock);
+ pblk_rb_sync_end(rb, NULL);
- return 1;
+ return bio ? 1 : 0;
}
static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
@@ -416,7 +416,7 @@ void pblk_rb_flush(struct pblk_rb *rb)
struct pblk *pblk = container_of(rb, struct pblk, rwb);
unsigned int mem = READ_ONCE(rb->mem);
- if (pblk_rb_sync_point_set(rb, NULL, mem))
+ if (pblk_rb_flush_point_set(rb, NULL, mem))
return;
pblk_write_should_kick(pblk);
@@ -440,7 +440,7 @@ static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->nr_flush);
#endif
- if (pblk_rb_sync_point_set(&pblk->rwb, bio, mem))
+ if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem))
*io_ret = NVM_IO_OK;
}
@@ -606,21 +606,6 @@ try:
return NVM_IO_ERR;
}
- if (flags & PBLK_FLUSH_ENTRY) {
- unsigned int sync_point;
-
- sync_point = READ_ONCE(rb->sync_point);
- if (sync_point == pos) {
- /* Protect syncs */
- smp_store_release(&rb->sync_point, EMPTY_ENTRY);
- }
-
- flags &= ~PBLK_FLUSH_ENTRY;
-#ifdef CONFIG_NVM_DEBUG
- atomic_dec(&rb->inflight_sync_point);
-#endif
- }
-
flags &= ~PBLK_WRITTEN_DATA;
flags |= PBLK_SUBMITTED_ENTRY;
@@ -730,15 +715,24 @@ void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags)
unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
{
- unsigned int sync;
- unsigned int i;
-
+ unsigned int sync, flush_point;
lockdep_assert_held(&rb->s_lock);
sync = READ_ONCE(rb->sync);
+ flush_point = READ_ONCE(rb->flush_point);
- for (i = 0; i < nr_entries; i++)
- sync = (sync + 1) & (rb->nr_entries - 1);
+ if (flush_point != EMPTY_ENTRY) {
+ unsigned int secs_to_flush;
+
+ secs_to_flush = pblk_rb_ring_count(flush_point, sync,
+ rb->nr_entries);
+ if (secs_to_flush < nr_entries) {
+ /* Protect flush points */
+ smp_store_release(&rb->flush_point, EMPTY_ENTRY);
+ }
+ }
+
+ sync = (sync + nr_entries) & (rb->nr_entries - 1);
/* Protect from counts */
smp_store_release(&rb->sync, sync);
@@ -746,22 +740,27 @@ unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
return sync;
}
-unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb)
+/* Calculate how many sectors to submit up to the current flush point. */
+unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb)
{
- unsigned int subm, sync_point;
- unsigned int count;
+ unsigned int subm, sync, flush_point;
+ unsigned int submitted, to_flush;
- /* Protect syncs */
- sync_point = smp_load_acquire(&rb->sync_point);
- if (sync_point == EMPTY_ENTRY)
+ /* Protect flush points */
+ flush_point = smp_load_acquire(&rb->flush_point);
+ if (flush_point == EMPTY_ENTRY)
return 0;
+ /* Protect syncs */
+ sync = smp_load_acquire(&rb->sync);
+
subm = READ_ONCE(rb->subm);
+ submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries);
/* The sync point itself counts as a sector to sync */
- count = pblk_rb_ring_count(sync_point, subm, rb->nr_entries) + 1;
+ to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1;
- return count;
+ return (submitted < to_flush) ? (to_flush - submitted) : 0;
}
/*
@@ -801,7 +800,7 @@ int pblk_rb_tear_down_check(struct pblk_rb *rb)
if ((rb->mem == rb->subm) && (rb->subm == rb->sync) &&
(rb->sync == rb->l2p_update) &&
- (rb->sync_point == EMPTY_ENTRY)) {
+ (rb->flush_point == EMPTY_ENTRY)) {
goto out;
}
@@ -848,7 +847,7 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
queued_entries++;
spin_unlock_irq(&rb->s_lock);
- if (rb->sync_point != EMPTY_ENTRY)
+ if (rb->flush_point != EMPTY_ENTRY)
offset = scnprintf(buf, PAGE_SIZE,
"%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
rb->nr_entries,
@@ -857,14 +856,14 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb->sync,
rb->l2p_update,
#ifdef CONFIG_NVM_DEBUG
- atomic_read(&rb->inflight_sync_point),
+ atomic_read(&rb->inflight_flush_point),
#else
0,
#endif
- rb->sync_point,
+ rb->flush_point,
pblk_rb_read_count(rb),
pblk_rb_space(rb),
- pblk_rb_sync_point_count(rb),
+ pblk_rb_flush_point_count(rb),
queued_entries);
else
offset = scnprintf(buf, PAGE_SIZE,
@@ -875,13 +874,13 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb->sync,
rb->l2p_update,
#ifdef CONFIG_NVM_DEBUG
- atomic_read(&rb->inflight_sync_point),
+ atomic_read(&rb->inflight_flush_point),
#else
0,
#endif
pblk_rb_read_count(rb),
pblk_rb_space(rb),
- pblk_rb_sync_point_count(rb),
+ pblk_rb_flush_point_count(rb),
queued_entries);
return offset;
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index ca79d8f..2f76128 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -141,7 +141,7 @@ static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
struct ppa_addr ppa = ppa_list[i];
struct pblk_line *line;
- line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(ppa)];
kref_put(&line->ref, pblk_line_put_wq);
}
}
@@ -158,8 +158,12 @@ static void pblk_end_user_read(struct bio *bio)
static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
bool put_line)
{
+ struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct bio *bio = rqd->bio;
+ unsigned long start_time = r_ctx->start_time;
+
+ generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time);
if (rqd->error)
pblk_log_read_err(pblk, rqd);
@@ -193,9 +197,9 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
__pblk_end_io_read(pblk, rqd, true);
}
-static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned int bio_init_idx,
- unsigned long *read_bitmap)
+static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
+ unsigned int bio_init_idx,
+ unsigned long *read_bitmap)
{
struct bio *new_bio, *bio = rqd->bio;
struct pblk_sec_meta *meta_list = rqd->meta_list;
@@ -270,7 +274,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
i = 0;
hole = find_first_zero_bit(read_bitmap, nr_secs);
do {
- int line_id = pblk_dev_ppa_to_line(rqd->ppa_list[i]);
+ int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
struct pblk_line *line = &pblk->lines[line_id];
kref_put(&line->ref, pblk_line_put);
@@ -306,6 +310,8 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
return NVM_IO_OK;
err:
+ pr_err("pblk: failed to perform partial read\n");
+
/* Free allocated pages in new bio */
pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
__pblk_end_io_read(pblk, rqd, false);
@@ -357,6 +363,7 @@ retry:
int pblk_submit_read(struct pblk *pblk, struct bio *bio)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ struct request_queue *q = dev->q;
sector_t blba = pblk_get_lba(bio);
unsigned int nr_secs = pblk_get_secs(bio);
struct pblk_g_ctx *r_ctx;
@@ -372,6 +379,8 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
return NVM_IO_ERR;
}
+ generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0);
+
bitmap_zero(&read_bitmap, nr_secs);
rqd = pblk_alloc_rqd(pblk, PBLK_READ);
@@ -383,6 +392,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->end_io = pblk_end_io_read;
r_ctx = nvm_rq_to_pdu(rqd);
+ r_ctx->start_time = jiffies;
r_ctx->lba = blba;
/* Save the index for this bio's start. This is needed in case
@@ -422,7 +432,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
if (!int_bio) {
pr_err("pblk: could not clone read bio\n");
- return NVM_IO_ERR;
+ goto fail_end_io;
}
rqd->bio = int_bio;
@@ -433,7 +443,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
pr_err("pblk: read IO submission failed\n");
if (int_bio)
bio_put(int_bio);
- return ret;
+ goto fail_end_io;
}
return NVM_IO_OK;
@@ -442,17 +452,14 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
/* The read bio request could be partially filled by the write buffer,
* but there are some holes that need to be read from the drive.
*/
- ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
- if (ret) {
- pr_err("pblk: failed to perform partial read\n");
- return ret;
- }
-
- return NVM_IO_OK;
+ return pblk_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
fail_rqd_free:
pblk_free_rqd(pblk, rqd, PBLK_READ);
return ret;
+fail_end_io:
+ __pblk_end_io_read(pblk, rqd, false);
+ return ret;
}
static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index eadb3eb..1d5e961 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -111,18 +111,18 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
return 0;
}
-__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta_buf)
+int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
{
u32 crc;
crc = pblk_calc_emeta_crc(pblk, emeta_buf);
if (le32_to_cpu(emeta_buf->crc) != crc)
- return NULL;
+ return 1;
if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
- return NULL;
+ return 1;
- return emeta_to_lbas(pblk, emeta_buf);
+ return 0;
}
static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
@@ -137,7 +137,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
u64 nr_valid_lbas, nr_lbas = 0;
u64 i;
- lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
+ lba_list = emeta_to_lbas(pblk, emeta_buf);
if (!lba_list)
return 1;
@@ -149,7 +149,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
struct ppa_addr ppa;
int pos;
- ppa = addr_to_pblk_ppa(pblk, i, line->id);
+ ppa = addr_to_gen_ppa(pblk, i, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
/* Do not update bad blocks */
@@ -188,7 +188,7 @@ static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line)
int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] -
- nr_bb * geo->sec_per_blk;
+ nr_bb * geo->sec_per_chk;
}
struct pblk_recov_alloc {
@@ -263,12 +263,12 @@ next_read_rq:
int pos;
ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
r_ptr_int += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, r_ptr_int++)
@@ -288,7 +288,7 @@ next_read_rq:
/* At this point, the read should not fail. If it does, it is a problem
* we cannot recover from here. Need FTL log.
*/
- if (rqd->error) {
+ if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
pr_err("pblk: L2P recovery failed (%d)\n", rqd->error);
return -EINTR;
}
@@ -411,12 +411,12 @@ next_pad_rq:
int pos;
w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
- ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
+ ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
w_ptr += pblk->min_write_pgs;
- ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
+ ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
}
@@ -541,12 +541,12 @@ next_rq:
w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
w_ptr += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++)
@@ -672,12 +672,12 @@ next_rq:
paddr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
ppa = addr_to_gen_ppa(pblk, paddr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
paddr += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, paddr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, paddr++)
@@ -817,7 +817,7 @@ static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
while (emeta_secs) {
emeta_start--;
- ppa = addr_to_pblk_ppa(pblk, emeta_start, line->id);
+ ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
if (!test_bit(pos, line->blk_bitmap))
emeta_secs--;
@@ -938,6 +938,11 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
goto next;
}
+ if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
+ pblk_recov_l2p_from_oob(pblk, line);
+ goto next;
+ }
+
if (pblk_recov_l2p_from_emeta(pblk, line))
pblk_recov_l2p_from_oob(pblk, line);
@@ -984,10 +989,8 @@ next:
}
spin_unlock(&l_mg->free_lock);
- if (is_next) {
+ if (is_next)
pblk_line_erase(pblk, l_mg->data_next);
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
- }
out:
if (found_lines != recovered_lines)
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index dacc719..0d457b1 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -89,17 +89,15 @@ unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
return atomic_read(&rl->free_blocks);
}
-/*
- * We check for (i) the number of free blocks in the current LUN and (ii) the
- * total number of free blocks in the pblk instance. This is to even out the
- * number of free blocks on each LUN when GC kicks in.
- *
- * Only the total number of free blocks is used to configure the rate limiter.
- */
-void pblk_rl_update_rates(struct pblk_rl *rl)
+unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
+{
+ return atomic_read(&rl->free_user_blocks);
+}
+
+static void __pblk_rl_update_rates(struct pblk_rl *rl,
+ unsigned long free_blocks)
{
struct pblk *pblk = container_of(rl, struct pblk, rl);
- unsigned long free_blocks = pblk_rl_nr_free_blks(rl);
int max = rl->rb_budget;
if (free_blocks >= rl->high) {
@@ -132,20 +130,37 @@ void pblk_rl_update_rates(struct pblk_rl *rl)
pblk_gc_should_stop(pblk);
}
+void pblk_rl_update_rates(struct pblk_rl *rl)
+{
+ __pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
+}
+
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
{
int blk_in_line = atomic_read(&line->blk_in_line);
+ int free_blocks;
atomic_add(blk_in_line, &rl->free_blocks);
- pblk_rl_update_rates(rl);
+ free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
+
+ __pblk_rl_update_rates(rl, free_blocks);
}
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line)
+void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
+ bool used)
{
int blk_in_line = atomic_read(&line->blk_in_line);
+ int free_blocks;
atomic_sub(blk_in_line, &rl->free_blocks);
- pblk_rl_update_rates(rl);
+
+ if (used)
+ free_blocks = atomic_sub_return(blk_in_line,
+ &rl->free_user_blocks);
+ else
+ free_blocks = atomic_read(&rl->free_user_blocks);
+
+ __pblk_rl_update_rates(rl, free_blocks);
}
int pblk_rl_high_thrs(struct pblk_rl *rl)
@@ -174,16 +189,21 @@ void pblk_rl_free(struct pblk_rl *rl)
void pblk_rl_init(struct pblk_rl *rl, int budget)
{
struct pblk *pblk = container_of(rl, struct pblk, rl);
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
int min_blocks = lm->blk_per_line * PBLK_GC_RSV_LINE;
+ int sec_meta, blk_meta;
+
unsigned int rb_windows;
- rl->high = rl->total_blocks / PBLK_USER_HIGH_THRS;
- rl->high_pw = get_count_order(rl->high);
+ /* Consider sectors used for metadata */
+ sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
+ blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
- rl->low = rl->total_blocks / PBLK_USER_LOW_THRS;
- if (rl->low < min_blocks)
- rl->low = min_blocks;
+ rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
+ rl->high_pw = get_count_order(rl->high);
rl->rsv_blocks = min_blocks;
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index cd49e88..620bab8 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -28,7 +28,7 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
ssize_t sz = 0;
int i;
- for (i = 0; i < geo->nr_luns; i++) {
+ for (i = 0; i < geo->all_luns; i++) {
int active = 1;
rlun = &pblk->luns[i];
@@ -49,11 +49,12 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
static ssize_t pblk_sysfs_rate_limiter(struct pblk *pblk, char *page)
{
- int free_blocks, total_blocks;
+ int free_blocks, free_user_blocks, total_blocks;
int rb_user_max, rb_user_cnt;
int rb_gc_max, rb_gc_cnt, rb_budget, rb_state;
- free_blocks = atomic_read(&pblk->rl.free_blocks);
+ free_blocks = pblk_rl_nr_free_blks(&pblk->rl);
+ free_user_blocks = pblk_rl_nr_user_free_blks(&pblk->rl);
rb_user_max = pblk->rl.rb_user_max;
rb_user_cnt = atomic_read(&pblk->rl.rb_user_cnt);
rb_gc_max = pblk->rl.rb_gc_max;
@@ -64,16 +65,16 @@ static ssize_t pblk_sysfs_rate_limiter(struct pblk *pblk, char *page)
total_blocks = pblk->rl.total_blocks;
return snprintf(page, PAGE_SIZE,
- "u:%u/%u,gc:%u/%u(%u/%u)(stop:<%u,full:>%u,free:%d/%d)-%d\n",
+ "u:%u/%u,gc:%u/%u(%u)(stop:<%u,full:>%u,free:%d/%d/%d)-%d\n",
rb_user_cnt,
rb_user_max,
rb_gc_cnt,
rb_gc_max,
rb_state,
rb_budget,
- pblk->rl.low,
pblk->rl.high,
free_blocks,
+ free_user_blocks,
total_blocks,
READ_ONCE(pblk->rl.rb_user_active));
}
@@ -238,7 +239,7 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
sz = snprintf(page, PAGE_SIZE - sz,
"line: nluns:%d, nblks:%d, nsecs:%d\n",
- geo->nr_luns, lm->blk_per_line, lm->sec_per_line);
+ geo->all_luns, lm->blk_per_line, lm->sec_per_line);
sz += snprintf(page + sz, PAGE_SIZE - sz,
"lines:d:%d,l:%d-f:%d,m:%d/%d,c:%d,b:%d,co:%d(d:%d,l:%d)t:%d\n",
@@ -287,7 +288,7 @@ static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page)
"blk_line:%d, sec_line:%d, sec_blk:%d\n",
lm->blk_per_line,
lm->sec_per_line,
- geo->sec_per_blk);
+ geo->sec_per_chk);
return sz;
}
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 6c1cafa..aae86ed 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -21,13 +21,28 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx)
{
struct bio *original_bio;
+ struct pblk_rb *rwb = &pblk->rwb;
unsigned long ret;
int i;
for (i = 0; i < c_ctx->nr_valid; i++) {
struct pblk_w_ctx *w_ctx;
+ int pos = c_ctx->sentry + i;
+ int flags;
+
+ w_ctx = pblk_rb_w_ctx(rwb, pos);
+ flags = READ_ONCE(w_ctx->flags);
+
+ if (flags & PBLK_FLUSH_ENTRY) {
+ flags &= ~PBLK_FLUSH_ENTRY;
+ /* Release flags on context. Protect from writes */
+ smp_store_release(&w_ctx->flags, flags);
+
+#ifdef CONFIG_NVM_DEBUG
+ atomic_dec(&rwb->inflight_flush_point);
+#endif
+ }
- w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
while ((original_bio = bio_list_pop(&w_ctx->bios)))
bio_endio(original_bio);
}
@@ -439,7 +454,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
struct pblk_line *meta_line;
int err;
- ppa_set_empty(&erase_ppa);
+ pblk_ppa_set_empty(&erase_ppa);
/* Assign lbas to ppas and populate request structure */
err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
@@ -457,7 +472,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
return NVM_IO_ERR;
}
- if (!ppa_empty(erase_ppa)) {
+ if (!pblk_ppa_empty(erase_ppa)) {
/* Submit erase for next data line */
if (pblk_blk_erase_async(pblk, erase_ppa)) {
struct pblk_line *e_line = pblk_line_get_erase(pblk);
@@ -508,7 +523,7 @@ static int pblk_submit_write(struct pblk *pblk)
if (!secs_avail)
return 1;
- secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
+ secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
return 1;
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 59a64d4..8c357fb 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -51,17 +51,16 @@
#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
-#define pblk_for_each_lun(pblk, rlun, i) \
- for ((i) = 0, rlun = &(pblk)->luns[0]; \
- (i) < (pblk)->nr_luns; (i)++, rlun = &(pblk)->luns[(i)])
-
/* Static pool sizes */
#define PBLK_GEN_WS_POOL_SIZE (2)
+#define PBLK_DEFAULT_OP (11)
+
enum {
PBLK_READ = READ,
PBLK_WRITE = WRITE,/* Write from write buffer */
PBLK_WRITE_INT, /* Internal write - no write buffer */
+ PBLK_READ_RECOV, /* Recovery read - errors allowed */
PBLK_ERASE,
};
@@ -114,6 +113,7 @@ struct pblk_c_ctx {
/* read context */
struct pblk_g_ctx {
void *private;
+ unsigned long start_time;
u64 lba;
};
@@ -170,7 +170,7 @@ struct pblk_rb {
* the last submitted entry that has
* been successfully persisted to media
*/
- unsigned int sync_point; /* Sync point - last entry that must be
+ unsigned int flush_point; /* Sync point - last entry that must be
* flushed to the media. Used with
* REQ_FLUSH and REQ_FUA
*/
@@ -193,7 +193,7 @@ struct pblk_rb {
spinlock_t s_lock; /* Sync lock */
#ifdef CONFIG_NVM_DEBUG
- atomic_t inflight_sync_point; /* Not served REQ_FLUSH | REQ_FUA */
+ atomic_t inflight_flush_point; /* Not served REQ_FLUSH | REQ_FUA */
#endif
};
@@ -256,9 +256,6 @@ struct pblk_rl {
unsigned int high; /* Upper threshold for rate limiter (free run -
* user I/O rate limiter
*/
- unsigned int low; /* Lower threshold for rate limiter (user I/O
- * rate limiter - stall)
- */
unsigned int high_pw; /* High rounded up as a power of 2 */
#define PBLK_USER_HIGH_THRS 8 /* Begin write limit at 12% available blks */
@@ -292,7 +289,9 @@ struct pblk_rl {
unsigned long long nr_secs;
unsigned long total_blocks;
- atomic_t free_blocks;
+
+ atomic_t free_blocks; /* Total number of free blocks (+ OP) */
+ atomic_t free_user_blocks; /* Number of user free blocks (no OP) */
};
#define PBLK_LINE_EMPTY (~0U)
@@ -583,7 +582,9 @@ struct pblk {
*/
sector_t capacity; /* Device capacity when bad blocks are subtracted */
- int over_pct; /* Percentage of device used for over-provisioning */
+
+ int op; /* Percentage of device used for over-provisioning */
+ int op_blks; /* Number of blocks used for over-provisioning */
/* pblk provisioning values. Used by rate limiter */
struct pblk_rl rl;
@@ -691,7 +692,7 @@ unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
struct ppa_addr *ppa);
void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
-unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb);
+unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
unsigned int pblk_rb_read_count(struct pblk_rb *rb);
unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
@@ -812,7 +813,7 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
void pblk_submit_rec(struct work_struct *work);
struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
int pblk_recov_pad(struct pblk *pblk);
-__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta);
+int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
struct pblk_rec_ctx *recovery, u64 *comp_bits,
unsigned int comp);
@@ -843,6 +844,7 @@ void pblk_rl_free(struct pblk_rl *rl);
void pblk_rl_update_rates(struct pblk_rl *rl);
int pblk_rl_high_thrs(struct pblk_rl *rl);
unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
+unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
@@ -851,7 +853,8 @@ void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
int pblk_rl_max_io(struct pblk_rl *rl);
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line);
+void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
+ bool used);
int pblk_rl_is_limit(struct pblk_rl *rl);
/*
@@ -907,28 +910,47 @@ static inline int pblk_pad_distance(struct pblk *pblk)
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- return NVM_MEM_PAGE_WRITE * geo->nr_luns * geo->sec_per_pl;
+ return NVM_MEM_PAGE_WRITE * geo->all_luns * geo->sec_per_pl;
}
-static inline int pblk_dev_ppa_to_line(struct ppa_addr p)
+static inline int pblk_ppa_to_line(struct ppa_addr p)
{
return p.g.blk;
}
-static inline int pblk_tgt_ppa_to_line(struct ppa_addr p)
+static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
{
- return p.g.blk;
+ return p.g.lun * geo->nr_chnls + p.g.ch;
}
-static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
+static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
+ u64 line_id)
{
- return p.g.lun * geo->nr_chnls + p.g.ch;
+ struct ppa_addr ppa;
+
+ ppa.ppa = 0;
+ ppa.g.blk = line_id;
+ ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
+ ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
+ ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
+ ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
+ ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
+
+ return ppa;
}
-/* A block within a line corresponds to the lun */
-static inline int pblk_dev_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
+static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
+ struct ppa_addr p)
{
- return p.g.lun * geo->nr_chnls + p.g.ch;
+ u64 paddr;
+
+ paddr = (u64)p.g.pg << pblk->ppaf.pg_offset;
+ paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
+ paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
+ paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
+ paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
+
+ return paddr;
}
static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
@@ -960,24 +982,6 @@ static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
return ppa64;
}
-static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
- sector_t lba)
-{
- struct ppa_addr ppa;
-
- if (pblk->ppaf_bitsize < 32) {
- u32 *map = (u32 *)pblk->trans_map;
-
- ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
- } else {
- struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
-
- ppa = map[lba];
- }
-
- return ppa;
-}
-
static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
{
u32 ppa32 = 0;
@@ -999,33 +1003,36 @@ static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
return ppa32;
}
-static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
- struct ppa_addr ppa)
+static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
+ sector_t lba)
{
+ struct ppa_addr ppa;
+
if (pblk->ppaf_bitsize < 32) {
u32 *map = (u32 *)pblk->trans_map;
- map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
+ ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
} else {
- u64 *map = (u64 *)pblk->trans_map;
+ struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
- map[lba] = ppa.ppa;
+ ppa = map[lba];
}
+
+ return ppa;
}
-static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
- struct ppa_addr p)
+static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
+ struct ppa_addr ppa)
{
- u64 paddr;
+ if (pblk->ppaf_bitsize < 32) {
+ u32 *map = (u32 *)pblk->trans_map;
- paddr = 0;
- paddr |= (u64)p.g.pg << pblk->ppaf.pg_offset;
- paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
- paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
- paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
- paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
+ map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
+ } else {
+ u64 *map = (u64 *)pblk->trans_map;
- return paddr;
+ map[lba] = ppa.ppa;
+ }
}
static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
@@ -1040,10 +1047,7 @@ static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
{
- if (lppa.ppa == rppa.ppa)
- return true;
-
- return false;
+ return (lppa.ppa == rppa.ppa);
}
static inline int pblk_addr_in_cache(struct ppa_addr ppa)
@@ -1066,32 +1070,6 @@ static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
return p;
}
-static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
- u64 line_id)
-{
- struct ppa_addr ppa;
-
- ppa.ppa = 0;
- ppa.g.blk = line_id;
- ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
- ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
- ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
- ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
- ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
-
- return ppa;
-}
-
-static inline struct ppa_addr addr_to_pblk_ppa(struct pblk *pblk, u64 paddr,
- u64 line_id)
-{
- struct ppa_addr ppa;
-
- ppa = addr_to_gen_ppa(pblk, paddr, line_id);
-
- return ppa;
-}
-
static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
struct line_header *header)
{
@@ -1212,10 +1190,10 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
if (!ppa->c.is_cached &&
ppa->g.ch < geo->nr_chnls &&
- ppa->g.lun < geo->luns_per_chnl &&
+ ppa->g.lun < geo->nr_luns &&
ppa->g.pl < geo->nr_planes &&
- ppa->g.blk < geo->blks_per_lun &&
- ppa->g.pg < geo->pgs_per_blk &&
+ ppa->g.blk < geo->nr_chks &&
+ ppa->g.pg < geo->ws_per_chk &&
ppa->g.sec < geo->sec_per_pg)
continue;
@@ -1245,7 +1223,7 @@ static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
for (i = 0; i < rqd->nr_ppas; i++) {
ppa = ppa_list[i];
- line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(ppa)];
spin_lock(&line->lock);
if (line->state != PBLK_LINESTATE_OPEN) {
@@ -1288,11 +1266,6 @@ static inline unsigned int pblk_get_secs(struct bio *bio)
return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
}
-static inline sector_t pblk_get_sector(sector_t lba)
-{
- return lba * NR_PHY_IN_LOG;
-}
-
static inline void pblk_setup_uuid(struct pblk *pblk)
{
uuid_le uuid;
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
deleted file mode 100644
index 0993c14..0000000
--- a/drivers/lightnvm/rrpc.c
+++ /dev/null
@@ -1,1625 +0,0 @@
-/*
- * Copyright (C) 2015 IT University of Copenhagen
- * Initial release: Matias Bjorling <m@bjorling.me>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
- */
-
-#include "rrpc.h"
-
-static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
-static DECLARE_RWSEM(rrpc_lock);
-
-static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags);
-
-#define rrpc_for_each_lun(rrpc, rlun, i) \
- for ((i) = 0, rlun = &(rrpc)->luns[0]; \
- (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
-
-static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block *rblk = a->rblk;
- unsigned int pg_offset;
-
- lockdep_assert_held(&rrpc->rev_lock);
-
- if (a->addr == ADDR_EMPTY || !rblk)
- return;
-
- spin_lock(&rblk->lock);
-
- div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
- WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
- rblk->nr_invalid_pages++;
-
- spin_unlock(&rblk->lock);
-
- rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
-}
-
-static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
- unsigned int len)
-{
- sector_t i;
-
- spin_lock(&rrpc->rev_lock);
- for (i = slba; i < slba + len; i++) {
- struct rrpc_addr *gp = &rrpc->trans_map[i];
-
- rrpc_page_invalidate(rrpc, gp);
- gp->rblk = NULL;
- }
- spin_unlock(&rrpc->rev_lock);
-}
-
-static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
- sector_t laddr, unsigned int pages)
-{
- struct nvm_rq *rqd;
- struct rrpc_inflight_rq *inf;
-
- rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
- if (!rqd)
- return ERR_PTR(-ENOMEM);
-
- inf = rrpc_get_inflight_rq(rqd);
- if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
- mempool_free(rqd, rrpc->rq_pool);
- return NULL;
- }
-
- return rqd;
-}
-
-static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
-
- rrpc_unlock_laddr(rrpc, inf);
-
- mempool_free(rqd, rrpc->rq_pool);
-}
-
-static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
-{
- sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
- sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
- struct nvm_rq *rqd;
-
- while (1) {
- rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
- if (rqd)
- break;
-
- schedule();
- }
-
- if (IS_ERR(rqd)) {
- pr_err("rrpc: unable to acquire inflight IO\n");
- bio_io_error(bio);
- return;
- }
-
- rrpc_invalidate_range(rrpc, slba, len);
- rrpc_inflight_laddr_release(rrpc, rqd);
-}
-
-static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
-
- return (rblk->next_page == dev->geo.sec_per_blk);
-}
-
-/* Calculate relative addr for the given block, considering instantiated LUNs */
-static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun = rblk->rlun;
-
- return rlun->id * dev->geo.sec_per_blk;
-}
-
-static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
- struct rrpc_addr *gp)
-{
- struct rrpc_block *rblk = gp->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
- u64 addr = gp->addr;
- struct ppa_addr paddr;
-
- paddr.ppa = addr;
- paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
- paddr.g.ch = rlun->bppa.g.ch;
- paddr.g.lun = rlun->bppa.g.lun;
- paddr.g.blk = rblk->id;
-
- return paddr;
-}
-
-/* requires lun->lock taken */
-static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
- struct rrpc_block **cur_rblk)
-{
- struct rrpc *rrpc = rlun->rrpc;
-
- if (*cur_rblk) {
- spin_lock(&(*cur_rblk)->lock);
- WARN_ON(!block_is_full(rrpc, *cur_rblk));
- spin_unlock(&(*cur_rblk)->lock);
- }
- *cur_rblk = new_rblk;
-}
-
-static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
- struct rrpc_lun *rlun)
-{
- struct rrpc_block *rblk = NULL;
-
- if (list_empty(&rlun->free_list))
- goto out;
-
- rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
-
- list_move_tail(&rblk->list, &rlun->used_list);
- rblk->state = NVM_BLK_ST_TGT;
- rlun->nr_free_blocks--;
-
-out:
- return rblk;
-}
-
-static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
- unsigned long flags)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block *rblk;
- int is_gc = flags & NVM_IOTYPE_GC;
-
- spin_lock(&rlun->lock);
- if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
- pr_err("nvm: rrpc: cannot give block to non GC request\n");
- spin_unlock(&rlun->lock);
- return NULL;
- }
-
- rblk = __rrpc_get_blk(rrpc, rlun);
- if (!rblk) {
- pr_err("nvm: rrpc: cannot get new block\n");
- spin_unlock(&rlun->lock);
- return NULL;
- }
- spin_unlock(&rlun->lock);
-
- bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
- rblk->next_page = 0;
- rblk->nr_invalid_pages = 0;
- atomic_set(&rblk->data_cmnt_size, 0);
-
- return rblk;
-}
-
-static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct rrpc_lun *rlun = rblk->rlun;
-
- spin_lock(&rlun->lock);
- if (rblk->state & NVM_BLK_ST_TGT) {
- list_move_tail(&rblk->list, &rlun->free_list);
- rlun->nr_free_blocks++;
- rblk->state = NVM_BLK_ST_FREE;
- } else if (rblk->state & NVM_BLK_ST_BAD) {
- list_move_tail(&rblk->list, &rlun->bb_list);
- rblk->state = NVM_BLK_ST_BAD;
- } else {
- WARN_ON_ONCE(1);
- pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id, rblk->state);
- list_move_tail(&rblk->list, &rlun->bb_list);
- }
- spin_unlock(&rlun->lock);
-}
-
-static void rrpc_put_blks(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- if (rlun->cur)
- rrpc_put_blk(rrpc, rlun->cur);
- if (rlun->gc_cur)
- rrpc_put_blk(rrpc, rlun->gc_cur);
- }
-}
-
-static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
-{
- int next = atomic_inc_return(&rrpc->next_lun);
-
- return &rrpc->luns[next % rrpc->nr_luns];
-}
-
-static void rrpc_gc_kick(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- unsigned int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- queue_work(rrpc->krqd_wq, &rlun->ws_gc);
- }
-}
-
-/*
- * timed GC every interval.
- */
-static void rrpc_gc_timer(struct timer_list *t)
-{
- struct rrpc *rrpc = from_timer(rrpc, t, gc_timer);
-
- rrpc_gc_kick(rrpc);
- mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
-}
-
-static void rrpc_end_sync_bio(struct bio *bio)
-{
- struct completion *waiting = bio->bi_private;
-
- if (bio->bi_status)
- pr_err("nvm: gc request failed (%u).\n", bio->bi_status);
-
- complete(waiting);
-}
-
-/*
- * rrpc_move_valid_pages -- migrate live data off the block
- * @rrpc: the 'rrpc' structure
- * @block: the block from which to migrate live pages
- *
- * Description:
- * GC algorithms may call this function to migrate remaining live
- * pages off the block prior to erasing it. This function blocks
- * further execution until the operation is complete.
- */
-static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct request_queue *q = dev->q;
- struct rrpc_rev_addr *rev;
- struct nvm_rq *rqd;
- struct bio *bio;
- struct page *page;
- int slot;
- int nr_sec_per_blk = dev->geo.sec_per_blk;
- u64 phys_addr;
- DECLARE_COMPLETION_ONSTACK(wait);
-
- if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
- return 0;
-
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio) {
- pr_err("nvm: could not alloc bio to gc\n");
- return -ENOMEM;
- }
-
- page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
-
- while ((slot = find_first_zero_bit(rblk->invalid_pages,
- nr_sec_per_blk)) < nr_sec_per_blk) {
-
- /* Lock laddr */
- phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
-
-try:
- spin_lock(&rrpc->rev_lock);
- /* Get logical address from physical to logical table */
- rev = &rrpc->rev_trans_map[phys_addr];
- /* already updated by previous regular write */
- if (rev->addr == ADDR_EMPTY) {
- spin_unlock(&rrpc->rev_lock);
- continue;
- }
-
- rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
- if (IS_ERR_OR_NULL(rqd)) {
- spin_unlock(&rrpc->rev_lock);
- schedule();
- goto try;
- }
-
- spin_unlock(&rrpc->rev_lock);
-
- /* Perform read to do GC */
- bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
- bio->bi_private = &wait;
- bio->bi_end_io = rrpc_end_sync_bio;
-
- /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
- bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
-
- if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
- pr_err("rrpc: gc read failed.\n");
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
- wait_for_completion_io(&wait);
- if (bio->bi_status) {
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
-
- bio_reset(bio);
- reinit_completion(&wait);
-
- bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- bio->bi_private = &wait;
- bio->bi_end_io = rrpc_end_sync_bio;
-
- bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
-
- /* turn the command around and write the data back to a new
- * address
- */
- if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
- pr_err("rrpc: gc write failed.\n");
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
- wait_for_completion_io(&wait);
-
- rrpc_inflight_laddr_release(rrpc, rqd);
- if (bio->bi_status)
- goto finished;
-
- bio_reset(bio);
- }
-
-finished:
- mempool_free(page, rrpc->page_pool);
- bio_put(bio);
-
- if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
- pr_err("nvm: failed to garbage collect block\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static void rrpc_block_gc(struct work_struct *work)
-{
- struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
- ws_gc);
- struct rrpc *rrpc = gcb->rrpc;
- struct rrpc_block *rblk = gcb->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
- struct ppa_addr ppa;
-
- mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-
- if (rrpc_move_valid_pages(rrpc, rblk))
- goto put_back;
-
- ppa.ppa = 0;
- ppa.g.ch = rlun->bppa.g.ch;
- ppa.g.lun = rlun->bppa.g.lun;
- ppa.g.blk = rblk->id;
-
- if (nvm_erase_sync(rrpc->dev, &ppa, 1))
- goto put_back;
-
- rrpc_put_blk(rrpc, rblk);
-
- return;
-
-put_back:
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->prio_list);
- spin_unlock(&rlun->lock);
-}
-
-/* the block with highest number of invalid pages, will be in the beginning
- * of the list
- */
-static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
- struct rrpc_block *rb)
-{
- if (ra->nr_invalid_pages == rb->nr_invalid_pages)
- return ra;
-
- return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
-}
-
-/* linearly find the block with highest number of invalid pages
- * requires lun->lock
- */
-static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
-{
- struct list_head *prio_list = &rlun->prio_list;
- struct rrpc_block *rblk, *max;
-
- BUG_ON(list_empty(prio_list));
-
- max = list_first_entry(prio_list, struct rrpc_block, prio);
- list_for_each_entry(rblk, prio_list, prio)
- max = rblk_max_invalid(max, rblk);
-
- return max;
-}
-
-static void rrpc_lun_gc(struct work_struct *work)
-{
- struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
- struct rrpc *rrpc = rlun->rrpc;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block_gc *gcb;
- unsigned int nr_blocks_need;
-
- nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
-
- if (nr_blocks_need < rrpc->nr_luns)
- nr_blocks_need = rrpc->nr_luns;
-
- spin_lock(&rlun->lock);
- while (nr_blocks_need > rlun->nr_free_blocks &&
- !list_empty(&rlun->prio_list)) {
- struct rrpc_block *rblk = block_prio_find_max(rlun);
-
- if (!rblk->nr_invalid_pages)
- break;
-
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb)
- break;
-
- list_del_init(&rblk->prio);
-
- WARN_ON(!block_is_full(rrpc, rblk));
-
- pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-
- gcb->rrpc = rrpc;
- gcb->rblk = rblk;
- INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
-
- queue_work(rrpc->kgc_wq, &gcb->ws_gc);
-
- nr_blocks_need--;
- }
- spin_unlock(&rlun->lock);
-
- /* TODO: Hint that request queue can be started again */
-}
-
-static void rrpc_gc_queue(struct work_struct *work)
-{
- struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
- ws_gc);
- struct rrpc *rrpc = gcb->rrpc;
- struct rrpc_block *rblk = gcb->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
-
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->prio_list);
- spin_unlock(&rlun->lock);
-
- mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-}
-
-static const struct block_device_operations rrpc_fops = {
- .owner = THIS_MODULE,
-};
-
-static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
-{
- unsigned int i;
- struct rrpc_lun *rlun, *max_free;
-
- if (!is_gc)
- return get_next_lun(rrpc);
-
- /* during GC, we don't care about RR, instead we want to make
- * sure that we maintain evenness between the block luns.
- */
- max_free = &rrpc->luns[0];
- /* prevent GC-ing lun from devouring pages of a lun with
- * little free blocks. We don't take the lock as we only need an
- * estimate.
- */
- rrpc_for_each_lun(rrpc, rlun, i) {
- if (rlun->nr_free_blocks > max_free->nr_free_blocks)
- max_free = rlun;
- }
-
- return max_free;
-}
-
-static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
- struct rrpc_block *rblk, u64 paddr)
-{
- struct rrpc_addr *gp;
- struct rrpc_rev_addr *rev;
-
- BUG_ON(laddr >= rrpc->nr_sects);
-
- gp = &rrpc->trans_map[laddr];
- spin_lock(&rrpc->rev_lock);
- if (gp->rblk)
- rrpc_page_invalidate(rrpc, gp);
-
- gp->addr = paddr;
- gp->rblk = rblk;
-
- rev = &rrpc->rev_trans_map[gp->addr];
- rev->addr = laddr;
- spin_unlock(&rrpc->rev_lock);
-
- return gp;
-}
-
-static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- u64 addr = ADDR_EMPTY;
-
- spin_lock(&rblk->lock);
- if (block_is_full(rrpc, rblk))
- goto out;
-
- addr = rblk->next_page;
-
- rblk->next_page++;
-out:
- spin_unlock(&rblk->lock);
- return addr;
-}
-
-/* Map logical address to a physical page. The mapping implements a round robin
- * approach and allocates a page from the next lun available.
- *
- * Returns rrpc_addr with the physical address and block. Returns NULL if no
- * blocks in the next rlun are available.
- */
-static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
- int is_gc)
-{
- struct nvm_tgt_dev *tgt_dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk, **cur_rblk;
- struct rrpc_addr *p;
- struct ppa_addr ppa;
- u64 paddr;
- int gc_force = 0;
-
- ppa.ppa = ADDR_EMPTY;
- rlun = rrpc_get_lun_rr(rrpc, is_gc);
-
- if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
- return ppa;
-
- /*
- * page allocation steps:
- * 1. Try to allocate new page from current rblk
- * 2a. If succeed, proceed to map it in and return
- * 2b. If fail, first try to allocate a new block from media manger,
- * and then retry step 1. Retry until the normal block pool is
- * exhausted.
- * 3. If exhausted, and garbage collector is requesting the block,
- * go to the reserved block and retry step 1.
- * In the case that this fails as well, or it is not GC
- * requesting, report not able to retrieve a block and let the
- * caller handle further processing.
- */
-
- spin_lock(&rlun->lock);
- cur_rblk = &rlun->cur;
- rblk = rlun->cur;
-retry:
- paddr = rrpc_alloc_addr(rrpc, rblk);
-
- if (paddr != ADDR_EMPTY)
- goto done;
-
- if (!list_empty(&rlun->wblk_list)) {
-new_blk:
- rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
- prio);
- rrpc_set_lun_cur(rlun, rblk, cur_rblk);
- list_del(&rblk->prio);
- goto retry;
- }
- spin_unlock(&rlun->lock);
-
- rblk = rrpc_get_blk(rrpc, rlun, gc_force);
- if (rblk) {
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->wblk_list);
- /*
- * another thread might already have added a new block,
- * Therefore, make sure that one is used, instead of the
- * one just added.
- */
- goto new_blk;
- }
-
- if (unlikely(is_gc) && !gc_force) {
- /* retry from emergency gc block */
- cur_rblk = &rlun->gc_cur;
- rblk = rlun->gc_cur;
- gc_force = 1;
- spin_lock(&rlun->lock);
- goto retry;
- }
-
- pr_err("rrpc: failed to allocate new block\n");
- return ppa;
-done:
- spin_unlock(&rlun->lock);
- p = rrpc_update_map(rrpc, laddr, rblk, paddr);
- if (!p)
- return ppa;
-
- /* return global address */
- return rrpc_ppa_to_gaddr(tgt_dev, p);
-}
-
-static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct rrpc_block_gc *gcb;
-
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb) {
- pr_err("rrpc: unable to queue block for gc.");
- return;
- }
-
- gcb->rrpc = rrpc;
- gcb->rblk = rblk;
-
- INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
- queue_work(rrpc->kgc_wq, &gcb->ws_gc);
-}
-
-static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
-{
- struct rrpc_lun *rlun = NULL;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
- rrpc->luns[i].bppa.g.lun == p.g.lun) {
- rlun = &rrpc->luns[i];
- break;
- }
- }
-
- return rlun;
-}
-
-static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
-
- rlun = rrpc_ppa_to_lun(rrpc, ppa);
- rblk = &rlun->blocks[ppa.g.blk];
- rblk->state = NVM_BLK_ST_BAD;
-
- nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
-}
-
-static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- void *comp_bits = &rqd->ppa_status;
- struct ppa_addr ppa, prev_ppa;
- int nr_ppas = rqd->nr_ppas;
- int bit;
-
- if (rqd->nr_ppas == 1)
- __rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
-
- ppa_set_empty(&prev_ppa);
- bit = -1;
- while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
- ppa = rqd->ppa_list[bit];
- if (ppa_cmp_blk(ppa, prev_ppa))
- continue;
-
- __rrpc_mark_bad_block(rrpc, ppa);
- }
-}
-
-static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
- sector_t laddr, uint8_t npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_addr *p;
- struct rrpc_block *rblk;
- int cmnt_size, i;
-
- for (i = 0; i < npages; i++) {
- p = &rrpc->trans_map[laddr + i];
- rblk = p->rblk;
-
- cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == dev->geo.sec_per_blk))
- rrpc_run_gc(rrpc, rblk);
- }
-}
-
-static void rrpc_end_io(struct nvm_rq *rqd)
-{
- struct rrpc *rrpc = rqd->private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
- uint8_t npages = rqd->nr_ppas;
- sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
-
- if (bio_data_dir(rqd->bio) == WRITE) {
- if (rqd->error == NVM_RSP_ERR_FAILWRITE)
- rrpc_mark_bad_block(rrpc, rqd);
-
- rrpc_end_io_write(rrpc, rrqd, laddr, npages);
- }
-
- bio_put(rqd->bio);
-
- if (rrqd->flags & NVM_IOTYPE_GC)
- return;
-
- rrpc_unlock_rq(rrpc, rqd);
-
- if (npages > 1)
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
-
- mempool_free(rqd, rrpc->rq_pool);
-}
-
-static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, int npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct rrpc_addr *gp;
- sector_t laddr = rrpc_get_laddr(bio);
- int is_gc = flags & NVM_IOTYPE_GC;
- int i;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
- return NVM_IO_REQUEUE;
- }
-
- for (i = 0; i < npages; i++) {
- /* We assume that mapping occurs at 4KB granularity */
- BUG_ON(!(laddr + i < rrpc->nr_sects));
- gp = &rrpc->trans_map[laddr + i];
-
- if (gp->rblk) {
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
- } else {
- BUG_ON(is_gc);
- rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- return NVM_IO_DONE;
- }
- }
-
- rqd->opcode = NVM_OP_HBREAD;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
- unsigned long flags)
-{
- int is_gc = flags & NVM_IOTYPE_GC;
- sector_t laddr = rrpc_get_laddr(bio);
- struct rrpc_addr *gp;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
- return NVM_IO_REQUEUE;
-
- BUG_ON(!(laddr < rrpc->nr_sects));
- gp = &rrpc->trans_map[laddr];
-
- if (gp->rblk) {
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
- } else {
- BUG_ON(is_gc);
- rrpc_unlock_rq(rrpc, rqd);
- return NVM_IO_DONE;
- }
-
- rqd->opcode = NVM_OP_HBREAD;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, int npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct ppa_addr p;
- sector_t laddr = rrpc_get_laddr(bio);
- int is_gc = flags & NVM_IOTYPE_GC;
- int i;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
- return NVM_IO_REQUEUE;
- }
-
- for (i = 0; i < npages; i++) {
- /* We assume that mapping occurs at 4KB granularity */
- p = rrpc_map_page(rrpc, laddr + i, is_gc);
- if (p.ppa == ADDR_EMPTY) {
- BUG_ON(is_gc);
- rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- rrpc_gc_kick(rrpc);
- return NVM_IO_REQUEUE;
- }
-
- rqd->ppa_list[i] = p;
- }
-
- rqd->opcode = NVM_OP_HBWRITE;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags)
-{
- struct ppa_addr p;
- int is_gc = flags & NVM_IOTYPE_GC;
- sector_t laddr = rrpc_get_laddr(bio);
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
- return NVM_IO_REQUEUE;
-
- p = rrpc_map_page(rrpc, laddr, is_gc);
- if (p.ppa == ADDR_EMPTY) {
- BUG_ON(is_gc);
- rrpc_unlock_rq(rrpc, rqd);
- rrpc_gc_kick(rrpc);
- return NVM_IO_REQUEUE;
- }
-
- rqd->ppa_addr = p;
- rqd->opcode = NVM_OP_HBWRITE;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
-
- if (npages > 1) {
- rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &rqd->dma_ppa_list);
- if (!rqd->ppa_list) {
- pr_err("rrpc: not able to allocate ppa list\n");
- return NVM_IO_ERR;
- }
-
- if (bio_op(bio) == REQ_OP_WRITE)
- return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
- npages);
-
- return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
- }
-
- if (bio_op(bio) == REQ_OP_WRITE)
- return rrpc_write_rq(rrpc, bio, rqd, flags);
-
- return rrpc_read_rq(rrpc, bio, rqd, flags);
-}
-
-static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
- uint8_t nr_pages = rrpc_get_pages(bio);
- int bio_size = bio_sectors(bio) << 9;
- int err;
-
- if (bio_size < dev->geo.sec_size)
- return NVM_IO_ERR;
- else if (bio_size > dev->geo.max_rq_size)
- return NVM_IO_ERR;
-
- err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
- if (err)
- return err;
-
- bio_get(bio);
- rqd->bio = bio;
- rqd->private = rrpc;
- rqd->nr_ppas = nr_pages;
- rqd->end_io = rrpc_end_io;
- rrq->flags = flags;
-
- err = nvm_submit_io(dev, rqd);
- if (err) {
- pr_err("rrpc: I/O submission failed: %d\n", err);
- bio_put(bio);
- if (!(flags & NVM_IOTYPE_GC)) {
- rrpc_unlock_rq(rrpc, rqd);
- if (rqd->nr_ppas > 1)
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- }
- return NVM_IO_ERR;
- }
-
- return NVM_IO_OK;
-}
-
-static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
-{
- struct rrpc *rrpc = q->queuedata;
- struct nvm_rq *rqd;
- int err;
-
- blk_queue_split(q, &bio);
-
- if (bio_op(bio) == REQ_OP_DISCARD) {
- rrpc_discard(rrpc, bio);
- return BLK_QC_T_NONE;
- }
-
- rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
- memset(rqd, 0, sizeof(struct nvm_rq));
-
- err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
- switch (err) {
- case NVM_IO_OK:
- return BLK_QC_T_NONE;
- case NVM_IO_ERR:
- bio_io_error(bio);
- break;
- case NVM_IO_DONE:
- bio_endio(bio);
- break;
- case NVM_IO_REQUEUE:
- spin_lock(&rrpc->bio_lock);
- bio_list_add(&rrpc->requeue_bios, bio);
- spin_unlock(&rrpc->bio_lock);
- queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
- break;
- }
-
- mempool_free(rqd, rrpc->rq_pool);
- return BLK_QC_T_NONE;
-}
-
-static void rrpc_requeue(struct work_struct *work)
-{
- struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
- struct bio_list bios;
- struct bio *bio;
-
- bio_list_init(&bios);
-
- spin_lock(&rrpc->bio_lock);
- bio_list_merge(&bios, &rrpc->requeue_bios);
- bio_list_init(&rrpc->requeue_bios);
- spin_unlock(&rrpc->bio_lock);
-
- while ((bio = bio_list_pop(&bios)))
- rrpc_make_rq(rrpc->disk->queue, bio);
-}
-
-static void rrpc_gc_free(struct rrpc *rrpc)
-{
- if (rrpc->krqd_wq)
- destroy_workqueue(rrpc->krqd_wq);
-
- if (rrpc->kgc_wq)
- destroy_workqueue(rrpc->kgc_wq);
-}
-
-static int rrpc_gc_init(struct rrpc *rrpc)
-{
- rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
- rrpc->nr_luns);
- if (!rrpc->krqd_wq)
- return -ENOMEM;
-
- rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
- if (!rrpc->kgc_wq)
- return -ENOMEM;
-
- timer_setup(&rrpc->gc_timer, rrpc_gc_timer, 0);
-
- return 0;
-}
-
-static void rrpc_map_free(struct rrpc *rrpc)
-{
- vfree(rrpc->rev_trans_map);
- vfree(rrpc->trans_map);
-}
-
-static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
-{
- struct rrpc *rrpc = (struct rrpc *)private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_addr *addr = rrpc->trans_map + slba;
- struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- u64 i;
-
- for (i = 0; i < nlb; i++) {
- struct ppa_addr gaddr;
- u64 pba = le64_to_cpu(entries[i]);
- unsigned int mod;
-
- /* LNVM treats address-spaces as silos, LBA and PBA are
- * equally large and zero-indexed.
- */
- if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
- pr_err("nvm: L2P data entry is out of bounds!\n");
- pr_err("nvm: Maybe loaded an old target L2P\n");
- return -EINVAL;
- }
-
- /* Address zero is a special one. The first page on a disk is
- * protected. As it often holds internal device boot
- * information.
- */
- if (!pba)
- continue;
-
- div_u64_rem(pba, rrpc->nr_sects, &mod);
-
- gaddr = rrpc_recov_addr(dev, pba);
- rlun = rrpc_ppa_to_lun(rrpc, gaddr);
- if (!rlun) {
- pr_err("rrpc: l2p corruption on lba %llu\n",
- slba + i);
- return -EINVAL;
- }
-
- rblk = &rlun->blocks[gaddr.g.blk];
- if (!rblk->state) {
- /* at this point, we don't know anything about the
- * block. It's up to the FTL on top to re-etablish the
- * block state. The block is assumed to be open.
- */
- list_move_tail(&rblk->list, &rlun->used_list);
- rblk->state = NVM_BLK_ST_TGT;
- rlun->nr_free_blocks--;
- }
-
- addr[i].addr = pba;
- addr[i].rblk = rblk;
- raddr[mod].addr = slba + i;
- }
-
- return 0;
-}
-
-static int rrpc_map_init(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t i;
- int ret;
-
- rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
- if (!rrpc->trans_map)
- return -ENOMEM;
-
- rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
- * rrpc->nr_sects);
- if (!rrpc->rev_trans_map)
- return -ENOMEM;
-
- for (i = 0; i < rrpc->nr_sects; i++) {
- struct rrpc_addr *p = &rrpc->trans_map[i];
- struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
-
- p->addr = ADDR_EMPTY;
- r->addr = ADDR_EMPTY;
- }
-
- /* Bring up the mapping table from device */
- ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
- rrpc_l2p_update, rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not read L2P table.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Minimum pages needed within a lun */
-#define PAGE_POOL_SIZE 16
-#define ADDR_POOL_SIZE 64
-
-static int rrpc_core_init(struct rrpc *rrpc)
-{
- down_write(&rrpc_lock);
- if (!rrpc_gcb_cache) {
- rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
- sizeof(struct rrpc_block_gc), 0, 0, NULL);
- if (!rrpc_gcb_cache) {
- up_write(&rrpc_lock);
- return -ENOMEM;
- }
-
- rrpc_rq_cache = kmem_cache_create("rrpc_rq",
- sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
- 0, 0, NULL);
- if (!rrpc_rq_cache) {
- kmem_cache_destroy(rrpc_gcb_cache);
- up_write(&rrpc_lock);
- return -ENOMEM;
- }
- }
- up_write(&rrpc_lock);
-
- rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
- if (!rrpc->page_pool)
- return -ENOMEM;
-
- rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
- rrpc_gcb_cache);
- if (!rrpc->gcb_pool)
- return -ENOMEM;
-
- rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
- if (!rrpc->rq_pool)
- return -ENOMEM;
-
- spin_lock_init(&rrpc->inflights.lock);
- INIT_LIST_HEAD(&rrpc->inflights.reqs);
-
- return 0;
-}
-
-static void rrpc_core_free(struct rrpc *rrpc)
-{
- mempool_destroy(rrpc->page_pool);
- mempool_destroy(rrpc->gcb_pool);
- mempool_destroy(rrpc->rq_pool);
-}
-
-static void rrpc_luns_free(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- int i;
-
- if (!rrpc->luns)
- return;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- vfree(rlun->blocks);
- }
-
- kfree(rrpc->luns);
-}
-
-static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
-{
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_block *rblk;
- struct ppa_addr ppa;
- u8 *blks;
- int nr_blks;
- int i;
- int ret;
-
- if (!dev->parent->ops->get_bb_tbl)
- return 0;
-
- nr_blks = geo->blks_per_lun * geo->plane_mode;
- blks = kmalloc(nr_blks, GFP_KERNEL);
- if (!blks)
- return -ENOMEM;
-
- ppa.ppa = 0;
- ppa.g.ch = rlun->bppa.g.ch;
- ppa.g.lun = rlun->bppa.g.lun;
-
- ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
- if (ret) {
- pr_err("rrpc: could not get BB table\n");
- goto out;
- }
-
- nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
- if (nr_blks < 0) {
- ret = nr_blks;
- goto out;
- }
-
- for (i = 0; i < nr_blks; i++) {
- if (blks[i] == NVM_BLK_T_FREE)
- continue;
-
- rblk = &rlun->blocks[i];
- list_move_tail(&rblk->list, &rlun->bb_list);
- rblk->state = NVM_BLK_ST_BAD;
- rlun->nr_free_blocks--;
- }
-
-out:
- kfree(blks);
- return ret;
-}
-
-static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
-{
- rlun->bppa.ppa = 0;
- rlun->bppa.g.ch = ppa.g.ch;
- rlun->bppa.g.lun = ppa.g.lun;
-}
-
-static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_lun *rlun;
- int i, j, ret = -EINVAL;
-
- if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
- pr_err("rrpc: number of pages per block too high.");
- return -EINVAL;
- }
-
- spin_lock_init(&rrpc->rev_lock);
-
- rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
- GFP_KERNEL);
- if (!rrpc->luns)
- return -ENOMEM;
-
- /* 1:1 mapping */
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- rlun->id = i;
- rrpc_set_lun_ppa(rlun, luns[i]);
- rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
- geo->blks_per_lun);
- if (!rlun->blocks) {
- ret = -ENOMEM;
- goto err;
- }
-
- INIT_LIST_HEAD(&rlun->free_list);
- INIT_LIST_HEAD(&rlun->used_list);
- INIT_LIST_HEAD(&rlun->bb_list);
-
- for (j = 0; j < geo->blks_per_lun; j++) {
- struct rrpc_block *rblk = &rlun->blocks[j];
-
- rblk->id = j;
- rblk->rlun = rlun;
- rblk->state = NVM_BLK_T_FREE;
- INIT_LIST_HEAD(&rblk->prio);
- INIT_LIST_HEAD(&rblk->list);
- spin_lock_init(&rblk->lock);
-
- list_add_tail(&rblk->list, &rlun->free_list);
- }
-
- rlun->rrpc = rrpc;
- rlun->nr_free_blocks = geo->blks_per_lun;
- rlun->reserved_blocks = 2; /* for GC only */
-
- INIT_LIST_HEAD(&rlun->prio_list);
- INIT_LIST_HEAD(&rlun->wblk_list);
-
- INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
- spin_lock_init(&rlun->lock);
-
- if (rrpc_bb_discovery(dev, rlun))
- goto err;
-
- }
-
- return 0;
-err:
- return ret;
-}
-
-/* returns 0 on success and stores the beginning address in *begin */
-static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t size = rrpc->nr_sects * dev->geo.sec_size;
- int ret;
-
- size >>= 9;
-
- ret = nvm_get_area(dev, begin, size);
- if (!ret)
- *begin >>= (ilog2(dev->geo.sec_size) - 9);
-
- return ret;
-}
-
-static void rrpc_area_free(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
-
- nvm_put_area(dev, begin);
-}
-
-static void rrpc_free(struct rrpc *rrpc)
-{
- rrpc_gc_free(rrpc);
- rrpc_map_free(rrpc);
- rrpc_core_free(rrpc);
- rrpc_luns_free(rrpc);
- rrpc_area_free(rrpc);
-
- kfree(rrpc);
-}
-
-static void rrpc_exit(void *private)
-{
- struct rrpc *rrpc = private;
-
- del_timer(&rrpc->gc_timer);
-
- flush_workqueue(rrpc->krqd_wq);
- flush_workqueue(rrpc->kgc_wq);
-
- rrpc_free(rrpc);
-}
-
-static sector_t rrpc_capacity(void *private)
-{
- struct rrpc *rrpc = private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t reserved, provisioned;
-
- /* cur, gc, and two emergency blocks for each lun */
- reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
- provisioned = rrpc->nr_sects - reserved;
-
- if (reserved > rrpc->nr_sects) {
- pr_err("rrpc: not enough space available to expose storage.\n");
- return 0;
- }
-
- sector_div(provisioned, 10);
- return provisioned * 9 * NR_PHY_IN_LOG;
-}
-
-/*
- * Looks up the logical address from reverse trans map and check if its valid by
- * comparing the logical to physical address with the physical address.
- * Returns 0 on free, otherwise 1 if in use
- */
-static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- int offset;
- struct rrpc_addr *laddr;
- u64 bpaddr, paddr, pladdr;
-
- bpaddr = block_to_rel_addr(rrpc, rblk);
- for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
- paddr = bpaddr + offset;
-
- pladdr = rrpc->rev_trans_map[paddr].addr;
- if (pladdr == ADDR_EMPTY)
- continue;
-
- laddr = &rrpc->trans_map[pladdr];
-
- if (paddr == laddr->addr) {
- laddr->rblk = rblk;
- } else {
- set_bit(offset, rblk->invalid_pages);
- rblk->nr_invalid_pages++;
- }
- }
-}
-
-static int rrpc_blocks_init(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- int lun_iter, blk_iter;
-
- for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
- rlun = &rrpc->luns[lun_iter];
-
- for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
- blk_iter++) {
- rblk = &rlun->blocks[blk_iter];
- rrpc_block_map_update(rrpc, rblk);
- }
- }
-
- return 0;
-}
-
-static int rrpc_luns_configure(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
-
- rblk = rrpc_get_blk(rrpc, rlun, 0);
- if (!rblk)
- goto err;
- rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
-
- /* Emergency gc block */
- rblk = rrpc_get_blk(rrpc, rlun, 1);
- if (!rblk)
- goto err;
- rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
- }
-
- return 0;
-err:
- rrpc_put_blks(rrpc);
- return -EINVAL;
-}
-
-static struct nvm_tgt_type tt_rrpc;
-
-static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
- int flags)
-{
- struct request_queue *bqueue = dev->q;
- struct request_queue *tqueue = tdisk->queue;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc *rrpc;
- sector_t soffset;
- int ret;
-
- if (!(dev->identity.dom & NVM_RSP_L2P)) {
- pr_err("nvm: rrpc: device does not support l2p (%x)\n",
- dev->identity.dom);
- return ERR_PTR(-EINVAL);
- }
-
- rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
- if (!rrpc)
- return ERR_PTR(-ENOMEM);
-
- rrpc->dev = dev;
- rrpc->disk = tdisk;
-
- bio_list_init(&rrpc->requeue_bios);
- spin_lock_init(&rrpc->bio_lock);
- INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
-
- rrpc->nr_luns = geo->nr_luns;
- rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
-
- /* simple round-robin strategy */
- atomic_set(&rrpc->next_lun, -1);
-
- ret = rrpc_area_init(rrpc, &soffset);
- if (ret < 0) {
- pr_err("nvm: rrpc: could not initialize area\n");
- return ERR_PTR(ret);
- }
- rrpc->soffset = soffset;
-
- ret = rrpc_luns_init(rrpc, dev->luns);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize luns\n");
- goto err;
- }
-
- ret = rrpc_core_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize core\n");
- goto err;
- }
-
- ret = rrpc_map_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize maps\n");
- goto err;
- }
-
- ret = rrpc_blocks_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize state for blocks\n");
- goto err;
- }
-
- ret = rrpc_luns_configure(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
- goto err;
- }
-
- ret = rrpc_gc_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize gc\n");
- goto err;
- }
-
- /* inherit the size from the underlying device */
- blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
- blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
-
- pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
- rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
-
- mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
-
- return rrpc;
-err:
- rrpc_free(rrpc);
- return ERR_PTR(ret);
-}
-
-/* round robin, page-based FTL, and cost-based GC */
-static struct nvm_tgt_type tt_rrpc = {
- .name = "rrpc",
- .version = {1, 0, 0},
-
- .make_rq = rrpc_make_rq,
- .capacity = rrpc_capacity,
-
- .init = rrpc_init,
- .exit = rrpc_exit,
-};
-
-static int __init rrpc_module_init(void)
-{
- return nvm_register_tgt_type(&tt_rrpc);
-}
-
-static void rrpc_module_exit(void)
-{
- nvm_unregister_tgt_type(&tt_rrpc);
-}
-
-module_init(rrpc_module_init);
-module_exit(rrpc_module_exit);
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
deleted file mode 100644
index fdb6ff9..0000000
--- a/drivers/lightnvm/rrpc.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Copyright (C) 2015 IT University of Copenhagen
- * Initial release: Matias Bjorling <m@bjorling.me>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
- */
-
-#ifndef RRPC_H_
-#define RRPC_H_
-
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/kthread.h>
-#include <linux/vmalloc.h>
-
-#include <linux/lightnvm.h>
-
-/* Run only GC if less than 1/X blocks are free */
-#define GC_LIMIT_INVERSE 10
-#define GC_TIME_SECS 100
-
-#define RRPC_SECTOR (512)
-#define RRPC_EXPOSED_PAGE_SIZE (4096)
-
-#define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
-
-struct rrpc_inflight {
- struct list_head reqs;
- spinlock_t lock;
-};
-
-struct rrpc_inflight_rq {
- struct list_head list;
- sector_t l_start;
- sector_t l_end;
-};
-
-struct rrpc_rq {
- struct rrpc_inflight_rq inflight_rq;
- unsigned long flags;
-};
-
-struct rrpc_block {
- int id; /* id inside of LUN */
- struct rrpc_lun *rlun;
-
- struct list_head prio; /* LUN CG list */
- struct list_head list; /* LUN free, used, bb list */
-
-#define MAX_INVALID_PAGES_STORAGE 8
- /* Bitmap for invalid page intries */
- unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
- /* points to the next writable page within a block */
- unsigned int next_page;
- /* number of pages that are invalid, wrt host page size */
- unsigned int nr_invalid_pages;
-
- int state;
-
- spinlock_t lock;
- atomic_t data_cmnt_size; /* data pages committed to stable storage */
-};
-
-struct rrpc_lun {
- struct rrpc *rrpc;
-
- int id;
- struct ppa_addr bppa;
-
- struct rrpc_block *cur, *gc_cur;
- struct rrpc_block *blocks; /* Reference to block allocation */
-
- struct list_head prio_list; /* Blocks that may be GC'ed */
- struct list_head wblk_list; /* Queued blocks to be written to */
-
- /* lun block lists */
- struct list_head used_list; /* In-use blocks */
- struct list_head free_list; /* Not used blocks i.e. released
- * and ready for use
- */
- struct list_head bb_list; /* Bad blocks. Mutually exclusive with
- * free_list and used_list
- */
- unsigned int nr_free_blocks; /* Number of unused blocks */
-
- struct work_struct ws_gc;
-
- int reserved_blocks;
-
- spinlock_t lock;
-};
-
-struct rrpc {
- struct nvm_tgt_dev *dev;
- struct gendisk *disk;
-
- sector_t soffset; /* logical sector offset */
-
- int nr_luns;
- struct rrpc_lun *luns;
-
- /* calculated values */
- unsigned long long nr_sects;
-
- /* Write strategy variables. Move these into each for structure for each
- * strategy
- */
- atomic_t next_lun; /* Whenever a page is written, this is updated
- * to point to the next write lun
- */
-
- spinlock_t bio_lock;
- struct bio_list requeue_bios;
- struct work_struct ws_requeue;
-
- /* Simple translation map of logical addresses to physical addresses.
- * The logical addresses is known by the host system, while the physical
- * addresses are used when writing to the disk block device.
- */
- struct rrpc_addr *trans_map;
- /* also store a reverse map for garbage collection */
- struct rrpc_rev_addr *rev_trans_map;
- spinlock_t rev_lock;
-
- struct rrpc_inflight inflights;
-
- mempool_t *addr_pool;
- mempool_t *page_pool;
- mempool_t *gcb_pool;
- mempool_t *rq_pool;
-
- struct timer_list gc_timer;
- struct workqueue_struct *krqd_wq;
- struct workqueue_struct *kgc_wq;
-};
-
-struct rrpc_block_gc {
- struct rrpc *rrpc;
- struct rrpc_block *rblk;
- struct work_struct ws_gc;
-};
-
-/* Logical to physical mapping */
-struct rrpc_addr {
- u64 addr;
- struct rrpc_block *rblk;
-};
-
-/* Physical to logical mapping */
-struct rrpc_rev_addr {
- u64 addr;
-};
-
-static inline struct ppa_addr rrpc_linear_to_generic_addr(struct nvm_geo *geo,
- struct ppa_addr r)
-{
- struct ppa_addr l;
- int secs, pgs;
- sector_t ppa = r.ppa;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, geo->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, geo->sec_per_pg);
- div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- return l;
-}
-
-static inline struct ppa_addr rrpc_recov_addr(struct nvm_tgt_dev *dev, u64 pba)
-{
- return linear_to_generic_addr(&dev->geo, pba);
-}
-
-static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_lun *rlun = rblk->rlun;
-
- return (rlun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
-}
-
-static inline sector_t rrpc_get_laddr(struct bio *bio)
-{
- return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
-}
-
-static inline unsigned int rrpc_get_pages(struct bio *bio)
-{
- return bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
-}
-
-static inline sector_t rrpc_get_sector(sector_t laddr)
-{
- return laddr * NR_PHY_IN_LOG;
-}
-
-static inline int request_intersects(struct rrpc_inflight_rq *r,
- sector_t laddr_start, sector_t laddr_end)
-{
- return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
-}
-
-static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
- unsigned int pages, struct rrpc_inflight_rq *r)
-{
- sector_t laddr_end = laddr + pages - 1;
- struct rrpc_inflight_rq *rtmp;
-
- WARN_ON(irqs_disabled());
-
- spin_lock_irq(&rrpc->inflights.lock);
- list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
- if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
- /* existing, overlapping request, come back later */
- spin_unlock_irq(&rrpc->inflights.lock);
- return 1;
- }
- }
-
- r->l_start = laddr;
- r->l_end = laddr_end;
-
- list_add_tail(&r->list, &rrpc->inflights.reqs);
- spin_unlock_irq(&rrpc->inflights.lock);
- return 0;
-}
-
-static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
- unsigned int pages,
- struct rrpc_inflight_rq *r)
-{
- BUG_ON((laddr + pages) > rrpc->nr_sects);
-
- return __rrpc_lock_laddr(rrpc, laddr, pages, r);
-}
-
-static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
-{
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
-
- return &rrqd->inflight_rq;
-}
-
-static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd)
-{
- sector_t laddr = rrpc_get_laddr(bio);
- unsigned int pages = rrpc_get_pages(bio);
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
-
- return rrpc_lock_laddr(rrpc, laddr, pages, r);
-}
-
-static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
- struct rrpc_inflight_rq *r)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rrpc->inflights.lock, flags);
- list_del_init(&r->list);
- spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
-}
-
-static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- uint8_t pages = rqd->nr_ppas;
-
- BUG_ON((r->l_start + pages) > rrpc->nr_sects);
-
- rrpc_unlock_laddr(rrpc, r);
-}
-
-#endif /* RRPC_H_ */
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index a0cc1bc..6cc6c0f 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -525,15 +525,21 @@ struct open_bucket {
/*
* We keep multiple buckets open for writes, and try to segregate different
- * write streams for better cache utilization: first we look for a bucket where
- * the last write to it was sequential with the current write, and failing that
- * we look for a bucket that was last used by the same task.
+ * write streams for better cache utilization: first we try to segregate flash
+ * only volume write streams from cached devices, secondly we look for a bucket
+ * where the last write to it was sequential with the current write, and
+ * failing that we look for a bucket that was last used by the same task.
*
* The ideas is if you've got multiple tasks pulling data into the cache at the
* same time, you'll get better cache utilization if you try to segregate their
* data and preserve locality.
*
- * For example, say you've starting Firefox at the same time you're copying a
+ * For example, dirty sectors of flash only volume is not reclaimable, if their
+ * dirty sectors mixed with dirty sectors of cached device, such buckets will
+ * be marked as dirty and won't be reclaimed, though the dirty data of cached
+ * device have been written back to backend device.
+ *
+ * And say you've starting Firefox at the same time you're copying a
* bunch of files. Firefox will likely end up being fairly hot and stay in the
* cache awhile, but the data you copied might not be; if you wrote all that
* data to the same buckets it'd get invalidated at the same time.
@@ -550,7 +556,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
struct open_bucket *ret, *ret_task = NULL;
list_for_each_entry_reverse(ret, &c->data_buckets, list)
- if (!bkey_cmp(&ret->key, search))
+ if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
+ UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
+ continue;
+ else if (!bkey_cmp(&ret->key, search))
goto found;
else if (ret->last_write_point == write_point)
ret_task = ret;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 843877e..5e2d4e8 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -320,14 +320,15 @@ struct cached_dev {
*/
atomic_t has_dirty;
- struct bch_ratelimit writeback_rate;
- struct delayed_work writeback_rate_update;
-
/*
- * Internal to the writeback code, so read_dirty() can keep track of
- * where it's at.
+ * Set to zero by things that touch the backing volume-- except
+ * writeback. Incremented by writeback. Used to determine when to
+ * accelerate idle writeback.
*/
- sector_t last_read;
+ atomic_t backing_idle;
+
+ struct bch_ratelimit writeback_rate;
+ struct delayed_work writeback_rate_update;
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
@@ -336,6 +337,14 @@ struct cached_dev {
struct keybuf writeback_keys;
+ /*
+ * Order the write-half of writeback operations strongly in dispatch
+ * order. (Maintain LBA order; don't allow reads completing out of
+ * order to re-order the writes...)
+ */
+ struct closure_waitlist writeback_ordering_wait;
+ atomic_t writeback_sequence_next;
+
/* For tracking sequential IO */
#define RECENT_IO_BITS 7
#define RECENT_IO (1 << RECENT_IO_BITS)
@@ -488,6 +497,7 @@ struct cache_set {
int caches_loaded;
struct bcache_device **devices;
+ unsigned devices_max_used;
struct list_head cached_devs;
uint64_t cached_dev_sectors;
struct closure caching;
@@ -852,7 +862,7 @@ static inline void wake_up_allocators(struct cache_set *c)
/* Forward declarations */
-void bch_count_io_errors(struct cache *, blk_status_t, const char *);
+void bch_count_io_errors(struct cache *, blk_status_t, int, const char *);
void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
blk_status_t, const char *);
void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 81e8dc3..bf3a48a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -419,7 +419,7 @@ static void do_btree_node_write(struct btree *b)
SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
bset_sector_offset(&b->keys, i));
- if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
+ if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
int j;
struct bio_vec *bv;
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
@@ -432,6 +432,7 @@ static void do_btree_node_write(struct btree *b)
continue_at(cl, btree_node_write_done, NULL);
} else {
+ /* No problem for multipage bvec since the bio is just allocated */
b->bio->bi_vcnt = 0;
bch_bio_map(b->bio, i);
@@ -1678,7 +1679,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
/* don't reclaim buckets to which writeback keys point */
rcu_read_lock();
- for (i = 0; i < c->nr_uuids; i++) {
+ for (i = 0; i < c->devices_max_used; i++) {
struct bcache_device *d = c->devices[i];
struct cached_dev *dc;
struct keybuf_key *w, *n;
@@ -1803,10 +1804,7 @@ static int bch_gc_thread(void *arg)
int bch_gc_thread_start(struct cache_set *c)
{
c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
- if (IS_ERR(c->gc_thread))
- return PTR_ERR(c->gc_thread);
-
- return 0;
+ return PTR_ERR_OR_ZERO(c->gc_thread);
}
/* Initial partial gc */
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 1841d03..7f12920 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <linux/sched/debug.h>
#include "closure.h"
@@ -18,10 +19,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
BUG_ON(flags & CLOSURE_GUARD_MASK);
BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
- /* Must deliver precisely one wakeup */
- if (r == 1 && (flags & CLOSURE_SLEEPING))
- wake_up_process(cl->task);
-
if (!r) {
if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
atomic_set(&cl->remaining,
@@ -100,28 +97,34 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
}
EXPORT_SYMBOL(closure_wait);
-/**
- * closure_sync - sleep until a closure has nothing left to wait on
- *
- * Sleeps until the refcount hits 1 - the thread that's running the closure owns
- * the last refcount.
- */
-void closure_sync(struct closure *cl)
+struct closure_syncer {
+ struct task_struct *task;
+ int done;
+};
+
+static void closure_sync_fn(struct closure *cl)
{
- while (1) {
- __closure_start_sleep(cl);
- closure_set_ret_ip(cl);
+ cl->s->done = 1;
+ wake_up_process(cl->s->task);
+}
- if ((atomic_read(&cl->remaining) &
- CLOSURE_REMAINING_MASK) == 1)
- break;
+void __sched __closure_sync(struct closure *cl)
+{
+ struct closure_syncer s = { .task = current };
+ cl->s = &s;
+ continue_at(cl, closure_sync_fn, NULL);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
schedule();
}
- __closure_end_sleep(cl);
+ __set_current_state(TASK_RUNNING);
}
-EXPORT_SYMBOL(closure_sync);
+EXPORT_SYMBOL(__closure_sync);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -168,12 +171,10 @@ static int debug_seq_show(struct seq_file *f, void *data)
cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK);
- seq_printf(f, "%s%s%s%s\n",
+ seq_printf(f, "%s%s\n",
test_bit(WORK_STRUCT_PENDING_BIT,
work_data_bits(&cl->work)) ? "Q" : "",
- r & CLOSURE_RUNNING ? "R" : "",
- r & CLOSURE_STACK ? "S" : "",
- r & CLOSURE_SLEEPING ? "Sl" : "");
+ r & CLOSURE_RUNNING ? "R" : "");
if (r & CLOSURE_WAITING)
seq_printf(f, " W %pF\n",
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index ccfbea6..3b9dfc9 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -103,6 +103,7 @@
*/
struct closure;
+struct closure_syncer;
typedef void (closure_fn) (struct closure *);
struct closure_waitlist {
@@ -115,10 +116,6 @@ enum closure_state {
* the thread that owns the closure, and cleared by the thread that's
* waking up the closure.
*
- * CLOSURE_SLEEPING: Must be set before a thread uses a closure to sleep
- * - indicates that cl->task is valid and closure_put() may wake it up.
- * Only set or cleared by the thread that owns the closure.
- *
* The rest are for debugging and don't affect behaviour:
*
* CLOSURE_RUNNING: Set when a closure is running (i.e. by
@@ -128,22 +125,16 @@ enum closure_state {
* continue_at() and closure_return() clear it for you, if you're doing
* something unusual you can use closure_set_dead() which also helps
* annotate where references are being transferred.
- *
- * CLOSURE_STACK: Sanity check - remaining should never hit 0 on a
- * closure with this flag set
*/
- CLOSURE_BITS_START = (1 << 23),
- CLOSURE_DESTRUCTOR = (1 << 23),
- CLOSURE_WAITING = (1 << 25),
- CLOSURE_SLEEPING = (1 << 27),
- CLOSURE_RUNNING = (1 << 29),
- CLOSURE_STACK = (1 << 31),
+ CLOSURE_BITS_START = (1U << 26),
+ CLOSURE_DESTRUCTOR = (1U << 26),
+ CLOSURE_WAITING = (1U << 28),
+ CLOSURE_RUNNING = (1U << 30),
};
#define CLOSURE_GUARD_MASK \
- ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING| \
- CLOSURE_RUNNING|CLOSURE_STACK) << 1)
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)
#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
@@ -152,7 +143,7 @@ struct closure {
union {
struct {
struct workqueue_struct *wq;
- struct task_struct *task;
+ struct closure_syncer *s;
struct llist_node list;
closure_fn *fn;
};
@@ -178,7 +169,19 @@ void closure_sub(struct closure *cl, int v);
void closure_put(struct closure *cl);
void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl);
-void closure_sync(struct closure *cl);
+void __closure_sync(struct closure *cl);
+
+/**
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
+ *
+ * Sleeps until the refcount hits 1 - the thread that's running the closure owns
+ * the last refcount.
+ */
+static inline void closure_sync(struct closure *cl)
+{
+ if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1)
+ __closure_sync(cl);
+}
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -215,24 +218,6 @@ static inline void closure_set_waiting(struct closure *cl, unsigned long f)
#endif
}
-static inline void __closure_end_sleep(struct closure *cl)
-{
- __set_current_state(TASK_RUNNING);
-
- if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
- atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
-}
-
-static inline void __closure_start_sleep(struct closure *cl)
-{
- closure_set_ip(cl);
- cl->task = current;
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
- atomic_add(CLOSURE_SLEEPING, &cl->remaining);
-}
-
static inline void closure_set_stopped(struct closure *cl)
{
atomic_sub(CLOSURE_RUNNING, &cl->remaining);
@@ -241,7 +226,6 @@ static inline void closure_set_stopped(struct closure *cl)
static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
struct workqueue_struct *wq)
{
- BUG_ON(object_is_on_stack(cl));
closure_set_ip(cl);
cl->fn = fn;
cl->wq = wq;
@@ -300,7 +284,7 @@ static inline void closure_init(struct closure *cl, struct closure *parent)
static inline void closure_init_stack(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
- atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
}
/**
@@ -322,6 +306,8 @@ static inline void closure_wake_up(struct closure_waitlist *list)
* This is because after calling continue_at() you no longer have a ref on @cl,
* and whatever @cl owns may be freed out from under you - a running closure fn
* has a ref on its own closure which continue_at() drops.
+ *
+ * Note you are expected to immediately return after using this macro.
*/
#define continue_at(_cl, _fn, _wq) \
do { \
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index c7a02c4..af89408 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -116,7 +116,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
return;
check->bi_opf = REQ_OP_READ;
- if (bio_alloc_pages(check, GFP_NOIO))
+ if (bch_bio_alloc_pages(check, GFP_NOIO))
goto out_put;
submit_bio_wait(check);
@@ -251,8 +251,7 @@ void bch_debug_exit(void)
int __init bch_debug_init(struct kobject *kobj)
{
- int ret = 0;
-
debug = debugfs_create_dir("bcache", NULL);
- return ret;
+
+ return IS_ERR_OR_NULL(debug);
}
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index fac97ec..a783c5a 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -51,7 +51,10 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
/* IO errors */
-void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m)
+void bch_count_io_errors(struct cache *ca,
+ blk_status_t error,
+ int is_read,
+ const char *m)
{
/*
* The halflife of an error is:
@@ -94,8 +97,9 @@ void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m)
errors >>= IO_ERROR_SHIFT;
if (errors < ca->set->error_limit)
- pr_err("%s: IO error on %s, recovering",
- bdevname(ca->bdev, buf), m);
+ pr_err("%s: IO error on %s%s",
+ bdevname(ca->bdev, buf), m,
+ is_read ? ", recovering." : ".");
else
bch_cache_set_error(ca->set,
"%s: too many IO errors %s",
@@ -108,6 +112,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
{
struct bbio *b = container_of(bio, struct bbio, bio);
struct cache *ca = PTR_CACHE(c, &b->key, 0);
+ int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
unsigned threshold = op_is_write(bio_op(bio))
? c->congested_write_threshold_us
@@ -129,7 +134,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
atomic_inc(&c->congested);
}
- bch_count_io_errors(ca, error, m);
+ bch_count_io_errors(ca, error, is_read, m);
}
void bch_bbio_endio(struct cache_set *c, struct bio *bio,
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index d50c1c9..a24c3a9 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -162,7 +162,7 @@ static void read_moving(struct cache_set *c)
bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio->bi_end_io = read_moving_endio;
- if (bio_alloc_pages(bio, GFP_KERNEL))
+ if (bch_bio_alloc_pages(bio, GFP_KERNEL))
goto err;
trace_bcache_gc_copy(&w->key);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 643c3021..1a46b41 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -576,6 +576,7 @@ static void cache_lookup(struct closure *cl)
{
struct search *s = container_of(cl, struct search, iop.cl);
struct bio *bio = &s->bio.bio;
+ struct cached_dev *dc;
int ret;
bch_btree_op_init(&s->op, -1);
@@ -588,6 +589,27 @@ static void cache_lookup(struct closure *cl)
return;
}
+ /*
+ * We might meet err when searching the btree, If that happens, we will
+ * get negative ret, in this scenario we should not recover data from
+ * backing device (when cache device is dirty) because we don't know
+ * whether bkeys the read request covered are all clean.
+ *
+ * And after that happened, s->iop.status is still its initial value
+ * before we submit s->bio.bio
+ */
+ if (ret < 0) {
+ BUG_ON(ret == -EINTR);
+ if (s->d && s->d->c &&
+ !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
+ dc = container_of(s->d, struct cached_dev, disk);
+ if (dc && atomic_read(&dc->has_dirty))
+ s->recoverable = false;
+ }
+ if (!s->iop.status)
+ s->iop.status = BLK_STS_IOERR;
+ }
+
closure_return(cl);
}
@@ -611,8 +633,8 @@ static void request_endio(struct bio *bio)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- struct request_queue *q = s->orig_bio->bi_disk->queue;
- generic_end_io_acct(q, bio_data_dir(s->orig_bio),
+ generic_end_io_acct(s->d->disk->queue,
+ bio_data_dir(s->orig_bio),
&s->d->disk->part0, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
@@ -841,7 +863,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
cache_bio->bi_private = &s->cl;
bch_bio_map(cache_bio, NULL);
- if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
+ if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put;
if (reada)
@@ -974,6 +996,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
int rw = bio_data_dir(bio);
+ atomic_set(&dc->backing_idle, 0);
generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
bio_set_dev(bio, dc->bdev);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index b4d2892..133b812 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -211,7 +211,7 @@ static void write_bdev_super_endio(struct bio *bio)
static void __write_super(struct cache_sb *sb, struct bio *bio)
{
- struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
+ struct cache_sb *out = page_address(bio_first_page_all(bio));
unsigned i;
bio->bi_iter.bi_sector = SB_SECTOR;
@@ -274,7 +274,9 @@ static void write_super_endio(struct bio *bio)
{
struct cache *ca = bio->bi_private;
- bch_count_io_errors(ca, bio->bi_status, "writing superblock");
+ /* is_read = 0 */
+ bch_count_io_errors(ca, bio->bi_status, 0,
+ "writing superblock");
closure_put(&ca->set->sb_write);
}
@@ -721,6 +723,9 @@ static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
d->c = c;
c->devices[id] = d;
+ if (id >= c->devices_max_used)
+ c->devices_max_used = id + 1;
+
closure_get(&c->caching);
}
@@ -906,6 +911,12 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_lock(&bch_register_lock);
+ cancel_delayed_work_sync(&dc->writeback_rate_update);
+ if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
+ kthread_stop(dc->writeback_thread);
+ dc->writeback_thread = NULL;
+ }
+
memset(&dc->sb.set_uuid, 0, 16);
SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
@@ -1166,7 +1177,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
dc->bdev->bd_holder = dc;
bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
- dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
get_page(sb_page);
if (cached_dev_init(dc, sb->block_size << 9))
@@ -1261,7 +1272,7 @@ static int flash_devs_run(struct cache_set *c)
struct uuid_entry *u;
for (u = c->uuids;
- u < c->uuids + c->nr_uuids && !ret;
+ u < c->uuids + c->devices_max_used && !ret;
u++)
if (UUID_FLASH_ONLY(u))
ret = flash_dev_run(c, u);
@@ -1427,7 +1438,7 @@ static void __cache_set_unregister(struct closure *cl)
mutex_lock(&bch_register_lock);
- for (i = 0; i < c->nr_uuids; i++)
+ for (i = 0; i < c->devices_max_used; i++)
if (c->devices[i]) {
if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
@@ -1490,7 +1501,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->bucket_bits = ilog2(sb->bucket_size);
c->block_bits = ilog2(sb->block_size);
c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
-
+ c->devices_max_used = 0;
c->btree_pages = bucket_pages(c);
if (c->btree_pages > BTREE_MAX_PAGES)
c->btree_pages = max_t(int, c->btree_pages / 4,
@@ -1810,7 +1821,7 @@ void bch_cache_release(struct kobject *kobj)
free_fifo(&ca->free[i]);
if (ca->sb_bio.bi_inline_vecs[0].bv_page)
- put_page(ca->sb_bio.bi_io_vec[0].bv_page);
+ put_page(bio_first_page_all(&ca->sb_bio));
if (!IS_ERR_OR_NULL(ca->bdev))
blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
@@ -1864,7 +1875,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
ca->bdev->bd_holder = ca;
bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
- ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
get_page(sb_page);
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index e548b8b..a23cd6a 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -249,6 +249,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
: 0;
}
+/*
+ * Generally it isn't good to access .bi_io_vec and .bi_vcnt directly,
+ * the preferred way is bio_add_page, but in this case, bch_bio_map()
+ * supposes that the bvec table is empty, so it is safe to access
+ * .bi_vcnt & .bi_io_vec in this way even after multipage bvec is
+ * supported.
+ */
void bch_bio_map(struct bio *bio, void *base)
{
size_t size = bio->bi_iter.bi_size;
@@ -276,6 +283,33 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
}
}
+/**
+ * bch_bio_alloc_pages - allocates a single page for each bvec in a bio
+ * @bio: bio to allocate pages for
+ * @gfp_mask: flags for allocation
+ *
+ * Allocates pages up to @bio->bi_vcnt.
+ *
+ * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
+ * freed.
+ */
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
+{
+ int i;
+ struct bio_vec *bv;
+
+ bio_for_each_segment_all(bv, bio, i) {
+ bv->bv_page = alloc_page(gfp_mask);
+ if (!bv->bv_page) {
+ while (--bv >= bio->bi_io_vec)
+ __free_page(bv->bv_page);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
/*
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any
* use permitted, subject to terms of PostgreSQL license; see.)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index ed5e8a4..4df4c5c 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -558,6 +558,7 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
}
void bch_bio_map(struct bio *bio, void *base);
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
static inline sector_t bdev_sectors(struct block_device *bdev)
{
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 56a3788..51306a1 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -18,17 +18,39 @@
#include <trace/events/bcache.h>
/* Rate limiting */
-
-static void __update_writeback_rate(struct cached_dev *dc)
+static uint64_t __calc_target_rate(struct cached_dev *dc)
{
struct cache_set *c = dc->disk.c;
+
+ /*
+ * This is the size of the cache, minus the amount used for
+ * flash-only devices
+ */
uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
bcache_flash_devs_sectors_dirty(c);
+
+ /*
+ * Unfortunately there is no control of global dirty data. If the
+ * user states that they want 10% dirty data in the cache, and has,
+ * e.g., 5 backing volumes of equal size, we try and ensure each
+ * backing volume uses about 2% of the cache for dirty data.
+ */
+ uint32_t bdev_share =
+ div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
+ c->cached_dev_sectors);
+
uint64_t cache_dirty_target =
div_u64(cache_sectors * dc->writeback_percent, 100);
- int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
- c->cached_dev_sectors);
+ /* Ensure each backing dev gets at least one dirty share */
+ if (bdev_share < 1)
+ bdev_share = 1;
+
+ return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
+}
+
+static void __update_writeback_rate(struct cached_dev *dc)
+{
/*
* PI controller:
* Figures out the amount that should be written per second.
@@ -49,6 +71,7 @@ static void __update_writeback_rate(struct cached_dev *dc)
* This acts as a slow, long-term average that is not subject to
* variations in usage like the p term.
*/
+ int64_t target = __calc_target_rate(dc);
int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
int64_t error = dirty - target;
int64_t proportional_scaled =
@@ -116,6 +139,7 @@ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
struct dirty_io {
struct closure cl;
struct cached_dev *dc;
+ uint16_t sequence;
struct bio bio;
};
@@ -194,6 +218,27 @@ static void write_dirty(struct closure *cl)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
+ struct cached_dev *dc = io->dc;
+
+ uint16_t next_sequence;
+
+ if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
+ /* Not our turn to write; wait for a write to complete */
+ closure_wait(&dc->writeback_ordering_wait, cl);
+
+ if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
+ /*
+ * Edge case-- it happened in indeterminate order
+ * relative to when we were added to wait list..
+ */
+ closure_wake_up(&dc->writeback_ordering_wait);
+ }
+
+ continue_at(cl, write_dirty, io->dc->writeback_write_wq);
+ return;
+ }
+
+ next_sequence = io->sequence + 1;
/*
* IO errors are signalled using the dirty bit on the key.
@@ -211,6 +256,9 @@ static void write_dirty(struct closure *cl)
closure_bio_submit(&io->bio, cl);
}
+ atomic_set(&dc->writeback_sequence_next, next_sequence);
+ closure_wake_up(&dc->writeback_ordering_wait);
+
continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
}
@@ -219,8 +267,10 @@ static void read_dirty_endio(struct bio *bio)
struct keybuf_key *w = bio->bi_private;
struct dirty_io *io = w->private;
+ /* is_read = 1 */
bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
- bio->bi_status, "reading dirty data from cache");
+ bio->bi_status, 1,
+ "reading dirty data from cache");
dirty_endio(bio);
}
@@ -237,10 +287,15 @@ static void read_dirty_submit(struct closure *cl)
static void read_dirty(struct cached_dev *dc)
{
unsigned delay = 0;
- struct keybuf_key *w;
+ struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
+ size_t size;
+ int nk, i;
struct dirty_io *io;
struct closure cl;
+ uint16_t sequence = 0;
+ BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
+ atomic_set(&dc->writeback_sequence_next, sequence);
closure_init_stack(&cl);
/*
@@ -248,45 +303,109 @@ static void read_dirty(struct cached_dev *dc)
* mempools.
*/
- while (!kthread_should_stop()) {
-
- w = bch_keybuf_next(&dc->writeback_keys);
- if (!w)
- break;
-
- BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
-
- if (KEY_START(&w->key) != dc->last_read ||
- jiffies_to_msecs(delay) > 50)
- while (!kthread_should_stop() && delay)
- delay = schedule_timeout_interruptible(delay);
-
- dc->last_read = KEY_OFFSET(&w->key);
-
- io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
- * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
- GFP_KERNEL);
- if (!io)
- goto err;
-
- w->private = io;
- io->dc = dc;
-
- dirty_init(w);
- bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
- io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
- bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
- io->bio.bi_end_io = read_dirty_endio;
-
- if (bio_alloc_pages(&io->bio, GFP_KERNEL))
- goto err_free;
+ next = bch_keybuf_next(&dc->writeback_keys);
+
+ while (!kthread_should_stop() && next) {
+ size = 0;
+ nk = 0;
+
+ do {
+ BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
+
+ /*
+ * Don't combine too many operations, even if they
+ * are all small.
+ */
+ if (nk >= MAX_WRITEBACKS_IN_PASS)
+ break;
+
+ /*
+ * If the current operation is very large, don't
+ * further combine operations.
+ */
+ if (size >= MAX_WRITESIZE_IN_PASS)
+ break;
+
+ /*
+ * Operations are only eligible to be combined
+ * if they are contiguous.
+ *
+ * TODO: add a heuristic willing to fire a
+ * certain amount of non-contiguous IO per pass,
+ * so that we can benefit from backing device
+ * command queueing.
+ */
+ if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
+ &START_KEY(&next->key)))
+ break;
+
+ size += KEY_SIZE(&next->key);
+ keys[nk++] = next;
+ } while ((next = bch_keybuf_next(&dc->writeback_keys)));
+
+ /* Now we have gathered a set of 1..5 keys to write back. */
+ for (i = 0; i < nk; i++) {
+ w = keys[i];
+
+ io = kzalloc(sizeof(struct dirty_io) +
+ sizeof(struct bio_vec) *
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ GFP_KERNEL);
+ if (!io)
+ goto err;
+
+ w->private = io;
+ io->dc = dc;
+ io->sequence = sequence++;
+
+ dirty_init(w);
+ bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
+ io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
+ bio_set_dev(&io->bio,
+ PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
+ io->bio.bi_end_io = read_dirty_endio;
+
+ if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
+ goto err_free;
+
+ trace_bcache_writeback(&w->key);
+
+ down(&dc->in_flight);
+
+ /* We've acquired a semaphore for the maximum
+ * simultaneous number of writebacks; from here
+ * everything happens asynchronously.
+ */
+ closure_call(&io->cl, read_dirty_submit, NULL, &cl);
+ }
- trace_bcache_writeback(&w->key);
+ delay = writeback_delay(dc, size);
- down(&dc->in_flight);
- closure_call(&io->cl, read_dirty_submit, NULL, &cl);
+ /* If the control system would wait for at least half a
+ * second, and there's been no reqs hitting the backing disk
+ * for awhile: use an alternate mode where we have at most
+ * one contiguous set of writebacks in flight at a time. If
+ * someone wants to do IO it will be quick, as it will only
+ * have to contend with one operation in flight, and we'll
+ * be round-tripping data to the backing disk as quickly as
+ * it can accept it.
+ */
+ if (delay >= HZ / 2) {
+ /* 3 means at least 1.5 seconds, up to 7.5 if we
+ * have slowed way down.
+ */
+ if (atomic_inc_return(&dc->backing_idle) >= 3) {
+ /* Wait for current I/Os to finish */
+ closure_sync(&cl);
+ /* And immediately launch a new set. */
+ delay = 0;
+ }
+ }
- delay = writeback_delay(dc, KEY_SIZE(&w->key));
+ while (!kthread_should_stop() && delay) {
+ schedule_timeout_interruptible(delay);
+ delay = writeback_delay(dc, 0);
+ }
}
if (0) {
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index a9e3ffb..66f1c52 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -5,6 +5,16 @@
#define CUTOFF_WRITEBACK 40
#define CUTOFF_WRITEBACK_SYNC 70
+#define MAX_WRITEBACKS_IN_PASS 5
+#define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
+
+/*
+ * 14 (16384ths) is chosen here as something that each backing device
+ * should be a reasonable fraction of the share, and not to blow up
+ * until individual backing devices are a petabyte.
+ */
+#define WRITEBACK_SHARE_SHIFT 14
+
static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
{
uint64_t i, ret = 0;
@@ -21,7 +31,7 @@ static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
mutex_lock(&bch_register_lock);
- for (i = 0; i < c->nr_uuids; i++) {
+ for (i = 0; i < c->devices_max_used; i++) {
struct bcache_device *d = c->devices[i];
if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b8ac591a..c546b56 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1611,7 +1611,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
int l;
struct dm_buffer *b, *tmp;
unsigned long freed = 0;
- unsigned long count = nr_to_scan;
+ unsigned long count = c->n_buffers[LIST_CLEAN] +
+ c->n_buffers[LIST_DIRTY];
unsigned long retain_target = get_retain_buffers(c);
for (l = 0; l < LIST_SIZE; l++) {
@@ -1647,8 +1648,11 @@ static unsigned long
dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
+ unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
+ READ_ONCE(c->n_buffers[LIST_DIRTY]);
+ unsigned long retain_target = get_retain_buffers(c);
- return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
+ return (count < retain_target) ? 0 : (count - retain_target);
}
/*
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index cf23a14..47407e4 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3472,18 +3472,18 @@ static int __init dm_cache_init(void)
{
int r;
- r = dm_register_target(&cache_target);
- if (r) {
- DMERR("cache target registration failed: %d", r);
- return r;
- }
-
migration_cache = KMEM_CACHE(dm_cache_migration, 0);
if (!migration_cache) {
dm_unregister_target(&cache_target);
return -ENOMEM;
}
+ r = dm_register_target(&cache_target);
+ if (r) {
+ DMERR("cache target registration failed: %d", r);
+ return r;
+ }
+
return 0;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9fc12f5..2ad4291 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1446,7 +1446,6 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
bio_for_each_segment_all(bv, clone, i) {
BUG_ON(!bv->bv_page);
mempool_free(bv->bv_page, cc->page_pool);
- bv->bv_page = NULL;
}
}
@@ -1954,10 +1953,15 @@ static int crypt_setkey(struct crypt_config *cc)
/* Ignore extra keys (which are used for IV etc) */
subkey_size = crypt_subkey_size(cc);
- if (crypt_integrity_hmac(cc))
+ if (crypt_integrity_hmac(cc)) {
+ if (subkey_size < cc->key_mac_size)
+ return -EINVAL;
+
crypt_copy_authenckey(cc->authenc_key, cc->key,
subkey_size - cc->key_mac_size,
cc->key_mac_size);
+ }
+
for (i = 0; i < cc->tfms_count; i++) {
if (crypt_integrity_hmac(cc))
r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
@@ -2053,9 +2057,6 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
ret = crypt_setkey(cc);
- /* wipe the kernel key payload copy in each case */
- memset(cc->key, 0, cc->key_size * sizeof(u8));
-
if (!ret) {
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
kzfree(cc->key_string);
@@ -2523,6 +2524,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
}
}
+ /* wipe the kernel key payload copy */
+ if (cc->key_string)
+ memset(cc->key, 0, cc->key_size * sizeof(u8));
+
return ret;
}
@@ -2740,6 +2745,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->tag_pool_max_sectors * cc->on_disk_tag_size);
if (!cc->tag_pool) {
ti->error = "Cannot allocate integrity tags mempool";
+ ret = -ENOMEM;
goto bad;
}
@@ -2961,6 +2967,9 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
return ret;
if (cc->iv_gen_ops && cc->iv_gen_ops->init)
ret = cc->iv_gen_ops->init(cc);
+ /* wipe the kernel key payload copy */
+ if (cc->key_string)
+ memset(cc->key, 0, cc->key_size * sizeof(u8));
return ret;
}
if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
@@ -3007,7 +3016,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 18, 0},
+ .version = {1, 18, 1},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 05c7bfd..46d7c87 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2559,7 +2559,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
int r = 0;
unsigned i;
__u64 journal_pages, journal_desc_size, journal_tree_size;
- unsigned char *crypt_data = NULL;
+ unsigned char *crypt_data = NULL, *crypt_iv = NULL;
+ struct skcipher_request *req = NULL;
ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
@@ -2617,9 +2618,20 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
if (blocksize == 1) {
struct scatterlist *sg;
- SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
- unsigned char iv[ivsize];
- skcipher_request_set_tfm(req, ic->journal_crypt);
+
+ req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
+ if (!req) {
+ *error = "Could not allocate crypt request";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ crypt_iv = kmalloc(ivsize, GFP_KERNEL);
+ if (!crypt_iv) {
+ *error = "Could not allocate iv";
+ r = -ENOMEM;
+ goto bad;
+ }
ic->journal_xor = dm_integrity_alloc_page_list(ic);
if (!ic->journal_xor) {
@@ -2641,9 +2653,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
sg_set_buf(&sg[i], va, PAGE_SIZE);
}
sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
- memset(iv, 0x00, ivsize);
+ memset(crypt_iv, 0x00, ivsize);
- skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv);
+ skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
init_completion(&comp.comp);
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
if (do_crypt(true, req, &comp))
@@ -2659,10 +2671,22 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
crypto_free_skcipher(ic->journal_crypt);
ic->journal_crypt = NULL;
} else {
- SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
- unsigned char iv[ivsize];
unsigned crypt_len = roundup(ivsize, blocksize);
+ req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
+ if (!req) {
+ *error = "Could not allocate crypt request";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ crypt_iv = kmalloc(ivsize, GFP_KERNEL);
+ if (!crypt_iv) {
+ *error = "Could not allocate iv";
+ r = -ENOMEM;
+ goto bad;
+ }
+
crypt_data = kmalloc(crypt_len, GFP_KERNEL);
if (!crypt_data) {
*error = "Unable to allocate crypt data";
@@ -2670,8 +2694,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
goto bad;
}
- skcipher_request_set_tfm(req, ic->journal_crypt);
-
ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
if (!ic->journal_scatterlist) {
*error = "Unable to allocate sg list";
@@ -2695,12 +2717,12 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
struct skcipher_request *section_req;
__u32 section_le = cpu_to_le32(i);
- memset(iv, 0x00, ivsize);
+ memset(crypt_iv, 0x00, ivsize);
memset(crypt_data, 0x00, crypt_len);
memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
sg_init_one(&sg, crypt_data, crypt_len);
- skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv);
+ skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
init_completion(&comp.comp);
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
if (do_crypt(true, req, &comp))
@@ -2758,6 +2780,9 @@ retest_commit_id:
}
bad:
kfree(crypt_data);
+ kfree(crypt_iv);
+ skcipher_request_free(req);
+
return r;
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index c8faa2b..ef57c6d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -458,6 +458,38 @@ do { \
} while (0)
/*
+ * Check whether bios must be queued in the device-mapper core rather
+ * than here in the target.
+ *
+ * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
+ * the same value then we are not between multipath_presuspend()
+ * and multipath_resume() calls and we have no need to check
+ * for the DMF_NOFLUSH_SUSPENDING flag.
+ */
+static bool __must_push_back(struct multipath *m, unsigned long flags)
+{
+ return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
+ dm_noflush_suspending(m->ti));
+}
+
+/*
+ * Following functions use READ_ONCE to get atomic access to
+ * all m->flags to avoid taking spinlock
+ */
+static bool must_push_back_rq(struct multipath *m)
+{
+ unsigned long flags = READ_ONCE(m->flags);
+ return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
+}
+
+static bool must_push_back_bio(struct multipath *m)
+{
+ unsigned long flags = READ_ONCE(m->flags);
+ return __must_push_back(m, flags);
+}
+
+/*
* Map cloned requests (request-based multipath)
*/
static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
@@ -478,7 +510,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
pgpath = choose_pgpath(m, nr_bytes);
if (!pgpath) {
- if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+ if (must_push_back_rq(m))
return DM_MAPIO_DELAY_REQUEUE;
dm_report_EIO(m); /* Failed */
return DM_MAPIO_KILL;
@@ -553,7 +585,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
}
if (!pgpath) {
- if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+ if (must_push_back_bio(m))
return DM_MAPIO_REQUEUE;
dm_report_EIO(m);
return DM_MAPIO_KILL;
@@ -651,8 +683,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
(save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
(!save_old_value && queue_if_no_path));
- assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
- queue_if_no_path || dm_noflush_suspending(m->ti));
+ assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
spin_unlock_irqrestore(&m->lock, flags);
if (!queue_if_no_path) {
@@ -1444,21 +1475,6 @@ static void activate_path_work(struct work_struct *work)
activate_or_offline_path(pgpath);
}
-static int noretry_error(blk_status_t error)
-{
- switch (error) {
- case BLK_STS_NOTSUPP:
- case BLK_STS_NOSPC:
- case BLK_STS_TARGET:
- case BLK_STS_NEXUS:
- case BLK_STS_MEDIUM:
- return 1;
- }
-
- /* Anything else could be a path failure, so should be retried */
- return 0;
-}
-
static int multipath_end_io(struct dm_target *ti, struct request *clone,
blk_status_t error, union map_info *map_context)
{
@@ -1477,7 +1493,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
* request into dm core, which will remake a clone request and
* clone bios for it and resubmit it later.
*/
- if (error && !noretry_error(error)) {
+ if (error && blk_path_error(error)) {
struct multipath *m = ti->private;
r = DM_ENDIO_REQUEUE;
@@ -1486,7 +1502,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
fail_path(pgpath);
if (atomic_read(&m->nr_valid_paths) == 0 &&
- !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+ !must_push_back_rq(m)) {
if (error == BLK_STS_IOERR)
dm_report_EIO(m);
/* complete with the original error */
@@ -1513,7 +1529,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
unsigned long flags;
int r = DM_ENDIO_DONE;
- if (!*error || noretry_error(*error))
+ if (!*error || !blk_path_error(*error))
goto done;
if (pgpath)
@@ -1521,8 +1537,12 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
if (atomic_read(&m->nr_valid_paths) == 0 &&
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
- dm_report_EIO(m);
- *error = BLK_STS_IOERR;
+ if (must_push_back_bio(m)) {
+ r = DM_ENDIO_REQUEUE;
+ } else {
+ dm_report_EIO(m);
+ *error = BLK_STS_IOERR;
+ }
goto done;
}
@@ -1957,13 +1977,6 @@ static int __init dm_multipath_init(void)
{
int r;
- r = dm_register_target(&multipath_target);
- if (r < 0) {
- DMERR("request-based register failed %d", r);
- r = -EINVAL;
- goto bad_register_target;
- }
-
kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
if (!kmultipathd) {
DMERR("failed to create workqueue kmpathd");
@@ -1985,13 +1998,20 @@ static int __init dm_multipath_init(void)
goto bad_alloc_kmpath_handlerd;
}
+ r = dm_register_target(&multipath_target);
+ if (r < 0) {
+ DMERR("request-based register failed %d", r);
+ r = -EINVAL;
+ goto bad_register_target;
+ }
+
return 0;
+bad_register_target:
+ destroy_workqueue(kmpath_handlerd);
bad_alloc_kmpath_handlerd:
destroy_workqueue(kmultipathd);
bad_alloc_kmultipathd:
- dm_unregister_target(&multipath_target);
-bad_register_target:
return r;
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 9d32f25..b7d175e 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -395,7 +395,7 @@ static void end_clone_request(struct request *clone, blk_status_t error)
dm_complete_request(tio->orig, error);
}
-static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
+static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
{
blk_status_t r;
@@ -404,9 +404,10 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
clone->start_time = jiffies;
r = blk_insert_cloned_request(clone->q, clone);
- if (r)
+ if (r != BLK_STS_OK && r != BLK_STS_RESOURCE)
/* must complete clone in terms of original request */
dm_complete_request(rq, r);
+ return r;
}
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
@@ -476,8 +477,10 @@ static int map_request(struct dm_rq_target_io *tio)
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
struct request *clone = NULL;
+ blk_status_t ret;
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
+check_again:
switch (r) {
case DM_MAPIO_SUBMITTED:
/* The target has taken the I/O to submit by itself later */
@@ -492,7 +495,17 @@ static int map_request(struct dm_rq_target_io *tio)
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
- dm_dispatch_clone_request(clone, rq);
+ ret = dm_dispatch_clone_request(clone, rq);
+ if (ret == BLK_STS_RESOURCE) {
+ blk_rq_unprep_clone(clone);
+ tio->ti->type->release_clone_rq(clone);
+ tio->clone = NULL;
+ if (!rq->q->mq_ops)
+ r = DM_MAPIO_DELAY_REQUEUE;
+ else
+ r = DM_MAPIO_REQUEUE;
+ goto check_again;
+ }
break;
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
@@ -713,8 +726,6 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
return error;
}
- elv_register_queue(md->queue);
-
return 0;
}
@@ -812,15 +823,8 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
}
dm_init_md_queue(md);
- /* backfill 'mq' sysfs registration normally done in blk_register_queue */
- err = blk_mq_register_dev(disk_to_dev(md->disk), q);
- if (err)
- goto out_cleanup_queue;
-
return 0;
-out_cleanup_queue:
- blk_cleanup_queue(q);
out_tag_set:
blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 1113b42..a0613bd 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -2411,24 +2411,6 @@ static int __init dm_snapshot_init(void)
return r;
}
- r = dm_register_target(&snapshot_target);
- if (r < 0) {
- DMERR("snapshot target register failed %d", r);
- goto bad_register_snapshot_target;
- }
-
- r = dm_register_target(&origin_target);
- if (r < 0) {
- DMERR("Origin target register failed %d", r);
- goto bad_register_origin_target;
- }
-
- r = dm_register_target(&merge_target);
- if (r < 0) {
- DMERR("Merge target register failed %d", r);
- goto bad_register_merge_target;
- }
-
r = init_origin_hash();
if (r) {
DMERR("init_origin_hash failed.");
@@ -2449,19 +2431,37 @@ static int __init dm_snapshot_init(void)
goto bad_pending_cache;
}
+ r = dm_register_target(&snapshot_target);
+ if (r < 0) {
+ DMERR("snapshot target register failed %d", r);
+ goto bad_register_snapshot_target;
+ }
+
+ r = dm_register_target(&origin_target);
+ if (r < 0) {
+ DMERR("Origin target register failed %d", r);
+ goto bad_register_origin_target;
+ }
+
+ r = dm_register_target(&merge_target);
+ if (r < 0) {
+ DMERR("Merge target register failed %d", r);
+ goto bad_register_merge_target;
+ }
+
return 0;
-bad_pending_cache:
- kmem_cache_destroy(exception_cache);
-bad_exception_cache:
- exit_origin_hash();
-bad_origin_hash:
- dm_unregister_target(&merge_target);
bad_register_merge_target:
dm_unregister_target(&origin_target);
bad_register_origin_target:
dm_unregister_target(&snapshot_target);
bad_register_snapshot_target:
+ kmem_cache_destroy(pending_cache);
+bad_pending_cache:
+ kmem_cache_destroy(exception_cache);
+bad_exception_cache:
+ exit_origin_hash();
+bad_origin_hash:
dm_exception_store_exit();
return r;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 88130b5..aaffd0c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -453,14 +453,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
refcount_set(&dd->count, 1);
list_add(&dd->list, &t->devices);
+ goto out;
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
- refcount_inc(&dd->count);
}
-
+ refcount_inc(&dd->count);
+out:
*result = dd->dm_dev;
return 0;
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index d31d18d..36ef284 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -80,10 +80,14 @@
#define SECTOR_TO_BLOCK_SHIFT 3
/*
+ * For btree insert:
* 3 for btree insert +
* 2 for btree lookup used within space map
+ * For btree remove:
+ * 2 for shadow spine +
+ * 4 for rebalance 3 child node
*/
-#define THIN_MAX_CONCURRENT_LOCKS 5
+#define THIN_MAX_CONCURRENT_LOCKS 6
/* This should be plenty */
#define SPACE_MAP_ROOT_SIZE 128
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 89e5dff..f91d771 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -4355,30 +4355,28 @@ static struct target_type thin_target = {
static int __init dm_thin_init(void)
{
- int r;
+ int r = -ENOMEM;
pool_table_init();
+ _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
+ if (!_new_mapping_cache)
+ return r;
+
r = dm_register_target(&thin_target);
if (r)
- return r;
+ goto bad_new_mapping_cache;
r = dm_register_target(&pool_target);
if (r)
- goto bad_pool_target;
-
- r = -ENOMEM;
-
- _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
- if (!_new_mapping_cache)
- goto bad_new_mapping_cache;
+ goto bad_thin_target;
return 0;
-bad_new_mapping_cache:
- dm_unregister_target(&pool_target);
-bad_pool_target:
+bad_thin_target:
dm_unregister_target(&thin_target);
+bad_new_mapping_cache:
+ kmem_cache_destroy(_new_mapping_cache);
return r;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index de17b71..8c26bfc 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -920,7 +920,15 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
return -EINVAL;
}
- ti->max_io_len = (uint32_t) len;
+ /*
+ * BIO based queue uses its own splitting. When multipage bvecs
+ * is switched on, size of the incoming bio may be too big to
+ * be handled in some targets, such as crypt.
+ *
+ * When these targets are ready for the big bio, we can remove
+ * the limit.
+ */
+ ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
return 0;
}
@@ -1753,7 +1761,7 @@ static struct mapped_device *alloc_dev(int minor)
goto bad;
md->dax_dev = dax_dev;
- add_disk(md->disk);
+ add_disk_no_queue_reg(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
@@ -2013,6 +2021,7 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
int r;
+ struct queue_limits limits;
enum dm_queue_mode type = dm_get_md_type(md);
switch (type) {
@@ -2049,6 +2058,14 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
break;
}
+ r = dm_calculate_queue_limits(t, &limits);
+ if (r) {
+ DMERR("Cannot calculate initial queue limits");
+ return r;
+ }
+ dm_table_set_restrictions(t, md->queue, &limits);
+ blk_register_queue(md->disk);
+
return 0;
}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index f21ce6a..58b3197 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
pn->keys[1] = rn->keys[0];
memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
- /*
- * rejig the spine. This is ugly, since it knows too
- * much about the spine
- */
- if (s->nodes[0] != new_parent) {
- unlock_block(s->info, s->nodes[0]);
- s->nodes[0] = new_parent;
- }
- if (key < le64_to_cpu(rn->keys[0])) {
- unlock_block(s->info, right);
- s->nodes[1] = left;
- } else {
- unlock_block(s->info, left);
- s->nodes[1] = right;
- }
- s->count = 2;
-
+ unlock_block(s->info, left);
+ unlock_block(s->info, right);
return 0;
}
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index a385a35..90a66b3 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -32,7 +32,6 @@
#include <linux/pm_runtime.h>
#include <linux/platform_data/mtd-nand-omap2.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
#include <asm/mach-types.h>
@@ -1138,6 +1137,112 @@ struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
}
EXPORT_SYMBOL_GPL(gpmc_omap_get_nand_ops);
+static void gpmc_omap_onenand_calc_sync_timings(struct gpmc_timings *t,
+ struct gpmc_settings *s,
+ int freq, int latency)
+{
+ struct gpmc_device_timings dev_t;
+ const int t_cer = 15;
+ const int t_avdp = 12;
+ const int t_cez = 20; /* max of t_cez, t_oez */
+ const int t_wpl = 40;
+ const int t_wph = 30;
+ int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo;
+
+ switch (freq) {
+ case 104:
+ min_gpmc_clk_period = 9600; /* 104 MHz */
+ t_ces = 3;
+ t_avds = 4;
+ t_avdh = 2;
+ t_ach = 3;
+ t_aavdh = 6;
+ t_rdyo = 6;
+ break;
+ case 83:
+ min_gpmc_clk_period = 12000; /* 83 MHz */
+ t_ces = 5;
+ t_avds = 4;
+ t_avdh = 2;
+ t_ach = 6;
+ t_aavdh = 6;
+ t_rdyo = 9;
+ break;
+ case 66:
+ min_gpmc_clk_period = 15000; /* 66 MHz */
+ t_ces = 6;
+ t_avds = 5;
+ t_avdh = 2;
+ t_ach = 6;
+ t_aavdh = 6;
+ t_rdyo = 11;
+ break;
+ default:
+ min_gpmc_clk_period = 18500; /* 54 MHz */
+ t_ces = 7;
+ t_avds = 7;
+ t_avdh = 7;
+ t_ach = 9;
+ t_aavdh = 7;
+ t_rdyo = 15;
+ break;
+ }
+
+ /* Set synchronous read timings */
+ memset(&dev_t, 0, sizeof(dev_t));
+
+ if (!s->sync_write) {
+ dev_t.t_avdp_w = max(t_avdp, t_cer) * 1000;
+ dev_t.t_wpl = t_wpl * 1000;
+ dev_t.t_wph = t_wph * 1000;
+ dev_t.t_aavdh = t_aavdh * 1000;
+ }
+ dev_t.ce_xdelay = true;
+ dev_t.avd_xdelay = true;
+ dev_t.oe_xdelay = true;
+ dev_t.we_xdelay = true;
+ dev_t.clk = min_gpmc_clk_period;
+ dev_t.t_bacc = dev_t.clk;
+ dev_t.t_ces = t_ces * 1000;
+ dev_t.t_avds = t_avds * 1000;
+ dev_t.t_avdh = t_avdh * 1000;
+ dev_t.t_ach = t_ach * 1000;
+ dev_t.cyc_iaa = (latency + 1);
+ dev_t.t_cez_r = t_cez * 1000;
+ dev_t.t_cez_w = dev_t.t_cez_r;
+ dev_t.cyc_aavdh_oe = 1;
+ dev_t.t_rdyo = t_rdyo * 1000 + min_gpmc_clk_period;
+
+ gpmc_calc_timings(t, s, &dev_t);
+}
+
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info)
+{
+ int ret;
+ struct gpmc_timings gpmc_t;
+ struct gpmc_settings gpmc_s;
+
+ gpmc_read_settings_dt(dev->of_node, &gpmc_s);
+
+ info->sync_read = gpmc_s.sync_read;
+ info->sync_write = gpmc_s.sync_write;
+ info->burst_len = gpmc_s.burst_len;
+
+ if (!gpmc_s.sync_read && !gpmc_s.sync_write)
+ return 0;
+
+ gpmc_omap_onenand_calc_sync_timings(&gpmc_t, &gpmc_s, freq, latency);
+
+ ret = gpmc_cs_program_settings(cs, &gpmc_s);
+ if (ret < 0)
+ return ret;
+
+ return gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s);
+}
+EXPORT_SYMBOL_GPL(gpmc_omap_onenand_set_timings);
+
int gpmc_get_client_irq(unsigned irq_config)
{
if (!gpmc_irq_domain) {
@@ -1916,41 +2021,6 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
of_property_read_bool(np, "gpmc,time-para-granularity");
}
-#if IS_ENABLED(CONFIG_MTD_ONENAND)
-static int gpmc_probe_onenand_child(struct platform_device *pdev,
- struct device_node *child)
-{
- u32 val;
- struct omap_onenand_platform_data *gpmc_onenand_data;
-
- if (of_property_read_u32(child, "reg", &val) < 0) {
- dev_err(&pdev->dev, "%pOF has no 'reg' property\n",
- child);
- return -ENODEV;
- }
-
- gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data),
- GFP_KERNEL);
- if (!gpmc_onenand_data)
- return -ENOMEM;
-
- gpmc_onenand_data->cs = val;
- gpmc_onenand_data->of_node = child;
- gpmc_onenand_data->dma_channel = -1;
-
- if (!of_property_read_u32(child, "dma-channel", &val))
- gpmc_onenand_data->dma_channel = val;
-
- return gpmc_onenand_init(gpmc_onenand_data);
-}
-#else
-static int gpmc_probe_onenand_child(struct platform_device *pdev,
- struct device_node *child)
-{
- return 0;
-}
-#endif
-
/**
* gpmc_probe_generic_child - configures the gpmc for a child device
* @pdev: pointer to gpmc platform device
@@ -2053,6 +2123,16 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
}
}
+ if (of_node_cmp(child->name, "onenand") == 0) {
+ /* Warn about older DT blobs with no compatible property */
+ if (!of_property_read_bool(child, "compatible")) {
+ dev_warn(&pdev->dev,
+ "Incompatible OneNAND node: missing compatible");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
if (of_device_is_compatible(child, "ti,omap2-nand")) {
/* NAND specific setup */
val = 8;
@@ -2077,8 +2157,9 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
} else {
ret = of_property_read_u32(child, "bank-width",
&gpmc_s.device_width);
- if (ret < 0) {
- dev_err(&pdev->dev, "%pOF has no 'bank-width' property\n",
+ if (ret < 0 && !gpmc_s.device_width) {
+ dev_err(&pdev->dev,
+ "%pOF has no 'gpmc,device-width' property\n",
child);
goto err;
}
@@ -2188,11 +2269,7 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
if (!child->name)
continue;
- if (of_node_cmp(child->name, "onenand") == 0)
- ret = gpmc_probe_onenand_child(pdev, child);
- else
- ret = gpmc_probe_generic_child(pdev, child);
-
+ ret = gpmc_probe_generic_child(pdev, child);
if (ret) {
dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n",
child->name, ret);
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index 7310e32..aa2b078 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -45,7 +45,7 @@ config MEMSTICK_R592
config MEMSTICK_REALTEK_PCI
tristate "Realtek PCI-E Memstick Card Interface Driver"
- depends on MFD_RTSX_PCI
+ depends on MISC_RTSX_PCI
help
Say Y here to include driver code to support Memstick card interface
of Realtek PCI-E card reader
@@ -55,7 +55,7 @@ config MEMSTICK_REALTEK_PCI
config MEMSTICK_REALTEK_USB
tristate "Realtek USB Memstick Card Interface Driver"
- depends on MFD_RTSX_USB
+ depends on MISC_RTSX_USB
help
Say Y here to include driver code to support Memstick card interface
of Realtek RTS5129/39 series USB card reader
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index 818fa94..a44b457 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -24,7 +24,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/memstick.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <asm/unaligned.h>
struct realtek_pci_ms {
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index 2e3cf01..4f64563 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -25,7 +25,7 @@
#include <linux/workqueue.h>
#include <linux/memstick.h>
#include <linux/kthread.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/sched.h>
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 1d20a80..b860eb5 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -222,6 +222,16 @@ config MFD_CROS_EC_SPI
response time cannot be guaranteed, we support ignoring
'pre-amble' bytes before the response actually starts.
+config MFD_CROS_EC_CHARDEV
+ tristate "Chrome OS Embedded Controller userspace device interface"
+ depends on MFD_CROS_EC
+ select CROS_EC_CTL
+ ---help---
+ This driver adds support to talk with the ChromeOS EC from userspace.
+
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called cros_ec_dev.
+
config MFD_ASIC3
bool "Compaq ASIC3"
depends on GPIOLIB && ARM
@@ -877,7 +887,7 @@ config UCB1400_CORE
config MFD_PM8XXX
tristate "Qualcomm PM8xxx PMIC chips driver"
- depends on (ARM || HEXAGON)
+ depends on (ARM || HEXAGON || COMPILE_TEST)
select IRQ_DOMAIN
select MFD_CORE
select REGMAP
@@ -929,17 +939,6 @@ config MFD_RDC321X
southbridge which provides access to GPIOs and Watchdog using the
southbridge PCI device configuration space.
-config MFD_RTSX_PCI
- tristate "Realtek PCI-E card reader"
- depends on PCI
- select MFD_CORE
- help
- This supports for Realtek PCI-Express card reader including rts5209,
- rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, etc.
- Realtek card reader supports access to many types of memory cards,
- such as Memory Stick, Memory Stick Pro, Secure Digital and
- MultiMediaCard.
-
config MFD_RT5033
tristate "Richtek RT5033 Power Management IC"
depends on I2C
@@ -953,16 +952,6 @@ config MFD_RT5033
sub-devices like charger, fuel gauge, flash LED, current source,
LDO and Buck.
-config MFD_RTSX_USB
- tristate "Realtek USB card reader"
- depends on USB
- select MFD_CORE
- help
- Select this option to get support for Realtek USB 2.0 card readers
- including RTS5129, RTS5139, RTS5179 and RTS5170.
- Realtek card reader supports access to many types of memory cards,
- such as Memory Stick Pro, Secure Digital and MultiMediaCard.
-
config MFD_RC5T583
bool "Ricoh RC5T583 Power Management system device"
depends on I2C=y
@@ -1859,5 +1848,13 @@ config MFD_VEXPRESS_SYSREG
System Registers are the platform configuration block
on the ARM Ltd. Versatile Express board.
+config RAVE_SP_CORE
+ tristate "RAVE SP MCU core driver"
+ depends on SERIAL_DEV_BUS
+ select CRC_CCITT
+ help
+ Select this to get support for the Supervisory Processor
+ device found on several devices in RAVE line of hardware.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index d9474ad..d9d2cf0 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -17,12 +17,9 @@ cros_ec_core-$(CONFIG_ACPI) += cros_ec_acpi_gpe.o
obj-$(CONFIG_MFD_CROS_EC) += cros_ec_core.o
obj-$(CONFIG_MFD_CROS_EC_I2C) += cros_ec_i2c.o
obj-$(CONFIG_MFD_CROS_EC_SPI) += cros_ec_spi.o
+obj-$(CONFIG_MFD_CROS_EC_CHARDEV) += cros_ec_dev.o
obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o
-obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o
-obj-$(CONFIG_MFD_RTSX_USB) += rtsx_usb.o
-
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
@@ -230,3 +227,5 @@ obj-$(CONFIG_MFD_STM32_LPTIMER) += stm32-lptimer.o
obj-$(CONFIG_MFD_STM32_TIMERS) += stm32-timers.o
obj-$(CONFIG_MFD_MXS_LRADC) += mxs-lradc.o
obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o
+obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o
+
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index c1c8152..1afa27d 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -1258,6 +1258,19 @@ static struct ab8500_prcmu_ranges ab8540_debug_ranges[AB8500_NUM_BANKS] = {
},
};
+#define DEFINE_SHOW_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+} \
static irqreturn_t ab8500_debug_handler(int irq, void *data)
{
@@ -1318,7 +1331,7 @@ static int ab8500_registers_print(struct device *dev, u32 bank,
return 0;
}
-static int ab8500_print_bank_registers(struct seq_file *s, void *p)
+static int ab8500_bank_registers_show(struct seq_file *s, void *p)
{
struct device *dev = s->private;
u32 bank = debug_bank;
@@ -1330,18 +1343,7 @@ static int ab8500_print_bank_registers(struct seq_file *s, void *p)
return ab8500_registers_print(dev, bank, s);
}
-static int ab8500_registers_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_print_bank_registers, inode->i_private);
-}
-
-static const struct file_operations ab8500_registers_fops = {
- .open = ab8500_registers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_bank_registers);
static int ab8500_print_all_banks(struct seq_file *s, void *p)
{
@@ -1528,7 +1530,7 @@ void ab8500_debug_register_interrupt(int line)
num_interrupts[line]++;
}
-static int ab8500_interrupts_print(struct seq_file *s, void *p)
+static int ab8500_interrupts_show(struct seq_file *s, void *p)
{
int line;
@@ -1557,10 +1559,7 @@ static int ab8500_interrupts_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_interrupts_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_interrupts_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_interrupts);
/*
* - HWREG DB8500 formated routines
@@ -1603,7 +1602,7 @@ static int ab8500_hwreg_open(struct inode *inode, struct file *file)
#define AB8500_LAST_SIM_REG 0x8B
#define AB8505_LAST_SIM_REG 0x8C
-static int ab8500_print_modem_registers(struct seq_file *s, void *p)
+static int ab8500_modem_show(struct seq_file *s, void *p)
{
struct device *dev = s->private;
struct ab8500 *ab8500;
@@ -1620,18 +1619,15 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p)
err = abx500_get_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, &orig_value);
- if (err < 0) {
- dev_err(dev, "ab->read fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_read_failure;
+
/* Config 1 will allow APE side to read SIM registers */
err = abx500_set_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG,
AB8500_SUPPLY_CONTROL_CONFIG_1);
- if (err < 0) {
- dev_err(dev, "ab->write fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_write_failure;
seq_printf(s, " bank 0x%02X:\n", bank);
@@ -1641,36 +1637,30 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p)
for (reg = AB8500_FIRST_SIM_REG; reg <= last_sim_reg; reg++) {
err = abx500_get_register_interruptible(dev,
bank, reg, &value);
- if (err < 0) {
- dev_err(dev, "ab->read fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_read_failure;
+
seq_printf(s, " [0x%02X/0x%02X]: 0x%02X\n", bank, reg, value);
}
err = abx500_set_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, orig_value);
- if (err < 0) {
- dev_err(dev, "ab->write fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_write_failure;
+
return 0;
-}
-static int ab8500_modem_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_print_modem_registers,
- inode->i_private);
+report_read_failure:
+ dev_err(dev, "ab->read fail %d\n", err);
+ return err;
+
+report_write_failure:
+ dev_err(dev, "ab->write fail %d\n", err);
+ return err;
}
-static const struct file_operations ab8500_modem_fops = {
- .open = ab8500_modem_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_modem);
-static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_bat_ctrl_show(struct seq_file *s, void *p)
{
int bat_ctrl_raw;
int bat_ctrl_convert;
@@ -1687,21 +1677,9 @@ static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_bat_ctrl_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_bat_ctrl_fops = {
- .open = ab8500_gpadc_bat_ctrl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bat_ctrl);
-static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_btemp_ball_show(struct seq_file *s, void *p)
{
int btemp_ball_raw;
int btemp_ball_convert;
@@ -1718,22 +1696,9 @@ static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_btemp_ball_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_btemp_ball_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_btemp_ball);
-static const struct file_operations ab8500_gpadc_btemp_ball_fops = {
- .open = ab8500_gpadc_btemp_ball_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_charger_v_show(struct seq_file *s, void *p)
{
int main_charger_v_raw;
int main_charger_v_convert;
@@ -1750,22 +1715,9 @@ static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_charger_v_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_charger_v_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_main_charger_v_fops = {
- .open = ab8500_gpadc_main_charger_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_v);
-static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_acc_detect1_show(struct seq_file *s, void *p)
{
int acc_detect1_raw;
int acc_detect1_convert;
@@ -1782,22 +1734,9 @@ static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_acc_detect1_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_acc_detect1_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_acc_detect1_fops = {
- .open = ab8500_gpadc_acc_detect1_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect1);
-static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_acc_detect2_show(struct seq_file *s, void *p)
{
int acc_detect2_raw;
int acc_detect2_convert;
@@ -1814,22 +1753,9 @@ static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_acc_detect2_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_acc_detect2_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_acc_detect2_fops = {
- .open = ab8500_gpadc_acc_detect2_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect2);
-static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_aux1_show(struct seq_file *s, void *p)
{
int aux1_raw;
int aux1_convert;
@@ -1846,20 +1772,9 @@ static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_aux1_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_aux1_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux1);
-static const struct file_operations ab8500_gpadc_aux1_fops = {
- .open = ab8500_gpadc_aux1_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_aux2_show(struct seq_file *s, void *p)
{
int aux2_raw;
int aux2_convert;
@@ -1876,20 +1791,9 @@ static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_aux2_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_aux2_print, inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_aux2_fops = {
- .open = ab8500_gpadc_aux2_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux2);
-static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_bat_v_show(struct seq_file *s, void *p)
{
int main_bat_v_raw;
int main_bat_v_convert;
@@ -1906,22 +1810,9 @@ static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_bat_v_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_bat_v_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_bat_v);
-static const struct file_operations ab8500_gpadc_main_bat_v_fops = {
- .open = ab8500_gpadc_main_bat_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_vbus_v_show(struct seq_file *s, void *p)
{
int vbus_v_raw;
int vbus_v_convert;
@@ -1938,20 +1829,9 @@ static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_vbus_v_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_vbus_v_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_vbus_v);
-static const struct file_operations ab8500_gpadc_vbus_v_fops = {
- .open = ab8500_gpadc_vbus_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_charger_c_show(struct seq_file *s, void *p)
{
int main_charger_c_raw;
int main_charger_c_convert;
@@ -1968,22 +1848,9 @@ static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_charger_c_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_charger_c_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_main_charger_c_fops = {
- .open = ab8500_gpadc_main_charger_c_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_c);
-static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_usb_charger_c_show(struct seq_file *s, void *p)
{
int usb_charger_c_raw;
int usb_charger_c_convert;
@@ -2000,22 +1867,9 @@ static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_usb_charger_c_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_usb_charger_c_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_usb_charger_c_fops = {
- .open = ab8500_gpadc_usb_charger_c_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_charger_c);
-static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_bk_bat_v_show(struct seq_file *s, void *p)
{
int bk_bat_v_raw;
int bk_bat_v_convert;
@@ -2032,21 +1886,9 @@ static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_bk_bat_v_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_bk_bat_v_fops = {
- .open = ab8500_gpadc_bk_bat_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bk_bat_v);
-static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_die_temp_show(struct seq_file *s, void *p)
{
int die_temp_raw;
int die_temp_convert;
@@ -2063,21 +1905,9 @@ static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_die_temp_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_die_temp);
-static const struct file_operations ab8500_gpadc_die_temp_fops = {
- .open = ab8500_gpadc_die_temp_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_usb_id_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_usb_id_show(struct seq_file *s, void *p)
{
int usb_id_raw;
int usb_id_convert;
@@ -2094,20 +1924,9 @@ static int ab8500_gpadc_usb_id_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_usb_id_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_usb_id_print, inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_usb_id_fops = {
- .open = ab8500_gpadc_usb_id_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_id);
-static int ab8540_gpadc_xtal_temp_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_xtal_temp_show(struct seq_file *s, void *p)
{
int xtal_temp_raw;
int xtal_temp_convert;
@@ -2124,21 +1943,9 @@ static int ab8540_gpadc_xtal_temp_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_xtal_temp_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8540_gpadc_xtal_temp_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_xtal_temp);
-static const struct file_operations ab8540_gpadc_xtal_temp_fops = {
- .open = ab8540_gpadc_xtal_temp_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_vbat_true_meas_show(struct seq_file *s, void *p)
{
int vbat_true_meas_raw;
int vbat_true_meas_convert;
@@ -2156,22 +1963,9 @@ static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_vbat_true_meas_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_true_meas_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas);
-static const struct file_operations ab8540_gpadc_vbat_true_meas_fops = {
- .open = ab8540_gpadc_vbat_true_meas_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_bat_ctrl_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_bat_ctrl_and_ibat_show(struct seq_file *s, void *p)
{
int bat_ctrl_raw;
int bat_ctrl_convert;
@@ -2197,22 +1991,9 @@ static int ab8540_gpadc_bat_ctrl_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_bat_ctrl_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_bat_ctrl_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_bat_ctrl_and_ibat_fops = {
- .open = ab8540_gpadc_bat_ctrl_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_ctrl_and_ibat);
-static int ab8540_gpadc_vbat_meas_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_vbat_meas_and_ibat_show(struct seq_file *s, void *p)
{
int vbat_meas_raw;
int vbat_meas_convert;
@@ -2237,23 +2018,9 @@ static int ab8540_gpadc_vbat_meas_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_vbat_meas_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_meas_and_ibat_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_meas_and_ibat);
-static const struct file_operations ab8540_gpadc_vbat_meas_and_ibat_fops = {
- .open = ab8540_gpadc_vbat_meas_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s,
- void *p)
+static int ab8540_gpadc_vbat_true_meas_and_ibat_show(struct seq_file *s, void *p)
{
int vbat_true_meas_raw;
int vbat_true_meas_convert;
@@ -2279,23 +2046,9 @@ static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s,
return 0;
}
-static int ab8540_gpadc_vbat_true_meas_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_true_meas_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations
-ab8540_gpadc_vbat_true_meas_and_ibat_fops = {
- .open = ab8540_gpadc_vbat_true_meas_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas_and_ibat);
-static int ab8540_gpadc_bat_temp_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_bat_temp_and_ibat_show(struct seq_file *s, void *p)
{
int bat_temp_raw;
int bat_temp_convert;
@@ -2320,22 +2073,9 @@ static int ab8540_gpadc_bat_temp_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_bat_temp_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_bat_temp_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_bat_temp_and_ibat_fops = {
- .open = ab8540_gpadc_bat_temp_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_temp_and_ibat);
-static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_otp_calib_show(struct seq_file *s, void *p)
{
struct ab8500_gpadc *gpadc;
u16 vmain_l, vmain_h, btemp_l, btemp_h;
@@ -2359,18 +2099,7 @@ static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_otp_cal_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8540_gpadc_otp_cal_print, inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_otp_calib_fops = {
- .open = ab8540_gpadc_otp_cal_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_otp_calib);
static int ab8500_gpadc_avg_sample_print(struct seq_file *s, void *p)
{
@@ -2903,14 +2632,6 @@ static const struct file_operations ab8500_val_fops = {
.owner = THIS_MODULE,
};
-static const struct file_operations ab8500_interrupts_fops = {
- .open = ab8500_interrupts_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
static const struct file_operations ab8500_subscribe_fops = {
.open = ab8500_subscribe_unsubscribe_open,
.write = ab8500_subscribe_write,
@@ -2997,7 +2718,7 @@ static int ab8500_debug_probe(struct platform_device *plf)
goto err;
file = debugfs_create_file("all-bank-registers", S_IRUGO, ab8500_dir,
- &plf->dev, &ab8500_registers_fops);
+ &plf->dev, &ab8500_bank_registers_fops);
if (!file)
goto err;
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 09cf369..a307832 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -184,6 +184,7 @@ static struct irq_chip arizona_irq_chip = {
};
static struct lock_class_key arizona_irq_lock_class;
+static struct lock_class_key arizona_irq_request_class;
static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
@@ -191,7 +192,8 @@ static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
struct arizona *data = h->host_data;
irq_set_chip_data(virq, data);
- irq_set_lockdep_class(virq, &arizona_irq_lock_class);
+ irq_set_lockdep_class(virq, &arizona_irq_lock_class,
+ &arizona_irq_request_class);
irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq);
irq_set_nested_thread(virq, 1);
irq_set_noprobe(virq);
diff --git a/drivers/mfd/atmel-flexcom.c b/drivers/mfd/atmel-flexcom.c
index 064bde9..f684a93 100644
--- a/drivers/mfd/atmel-flexcom.c
+++ b/drivers/mfd/atmel-flexcom.c
@@ -39,34 +39,43 @@
#define FLEX_MR_OPMODE(opmode) (((opmode) << FLEX_MR_OPMODE_OFFSET) & \
FLEX_MR_OPMODE_MASK)
+struct atmel_flexcom {
+ void __iomem *base;
+ u32 opmode;
+ struct clk *clk;
+};
static int atmel_flexcom_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct clk *clk;
struct resource *res;
- void __iomem *base;
- u32 opmode;
+ struct atmel_flexcom *ddata;
int err;
- err = of_property_read_u32(np, "atmel,flexcom-mode", &opmode);
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddata);
+
+ err = of_property_read_u32(np, "atmel,flexcom-mode", &ddata->opmode);
if (err)
return err;
- if (opmode < ATMEL_FLEXCOM_MODE_USART ||
- opmode > ATMEL_FLEXCOM_MODE_TWI)
+ if (ddata->opmode < ATMEL_FLEXCOM_MODE_USART ||
+ ddata->opmode > ATMEL_FLEXCOM_MODE_TWI)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ ddata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ddata->base))
+ return PTR_ERR(ddata->base);
- clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ddata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ddata->clk))
+ return PTR_ERR(ddata->clk);
- err = clk_prepare_enable(clk);
+ err = clk_prepare_enable(ddata->clk);
if (err)
return err;
@@ -76,9 +85,9 @@ static int atmel_flexcom_probe(struct platform_device *pdev)
* inaccessible and are read as zero. Also the external I/O lines of the
* Flexcom are muxed to reach the selected device.
*/
- writel(FLEX_MR_OPMODE(opmode), base + FLEX_MR);
+ writel(FLEX_MR_OPMODE(ddata->opmode), ddata->base + FLEX_MR);
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(ddata->clk);
return devm_of_platform_populate(&pdev->dev);
}
@@ -89,10 +98,34 @@ static const struct of_device_id atmel_flexcom_of_match[] = {
};
MODULE_DEVICE_TABLE(of, atmel_flexcom_of_match);
+#ifdef CONFIG_PM_SLEEP
+static int atmel_flexcom_resume(struct device *dev)
+{
+ struct atmel_flexcom *ddata = dev_get_drvdata(dev);
+ int err;
+ u32 val;
+
+ err = clk_prepare_enable(ddata->clk);
+ if (err)
+ return err;
+
+ val = FLEX_MR_OPMODE(ddata->opmode),
+ writel(val, ddata->base + FLEX_MR);
+
+ clk_disable_unprepare(ddata->clk);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(atmel_flexcom_pm_ops, NULL,
+ atmel_flexcom_resume);
+
static struct platform_driver atmel_flexcom_driver = {
.probe = atmel_flexcom_probe,
.driver = {
.name = "atmel_flexcom",
+ .pm = &atmel_flexcom_pm_ops,
.of_match_table = atmel_flexcom_of_match,
},
};
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 2468b43..e94c72c 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -129,6 +129,7 @@ static const struct regmap_range axp288_volatile_ranges[] = {
regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
regmap_reg_range(AXP288_BC_DET_STAT, AXP288_BC_DET_STAT),
+ regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
@@ -878,6 +879,9 @@ static struct mfd_cell axp813_cells[] = {
.resources = axp803_pek_resources,
}, {
.name = "axp20x-regulator",
+ }, {
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp813-gpio",
}
};
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index b0ca5a4c..d610241 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -40,13 +40,13 @@ static struct cros_ec_platform pd_p = {
};
static const struct mfd_cell ec_cell = {
- .name = "cros-ec-ctl",
+ .name = "cros-ec-dev",
.platform_data = &ec_p,
.pdata_size = sizeof(ec_p),
};
static const struct mfd_cell ec_pd_cell = {
- .name = "cros-ec-ctl",
+ .name = "cros-ec-dev",
.platform_data = &pd_p,
.pdata_size = sizeof(pd_p),
};
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index cf6c4f0..e4fafdd 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -25,9 +25,10 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include "cros_ec_debugfs.h"
#include "cros_ec_dev.h"
+#define DRV_NAME "cros-ec-dev"
+
/* Device variables */
#define CROS_MAX_DEV 128
static int ec_major;
@@ -461,7 +462,7 @@ static int ec_device_remove(struct platform_device *pdev)
}
static const struct platform_device_id cros_ec_id[] = {
- { "cros-ec-ctl", 0 },
+ { DRV_NAME, 0 },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, cros_ec_id);
@@ -493,7 +494,7 @@ static const struct dev_pm_ops cros_ec_dev_pm_ops = {
static struct platform_driver cros_ec_dev_driver = {
.driver = {
- .name = "cros-ec-ctl",
+ .name = DRV_NAME,
.pm = &cros_ec_dev_pm_ops,
},
.probe = ec_device_probe,
@@ -544,6 +545,7 @@ static void __exit cros_ec_dev_exit(void)
module_init(cros_ec_dev_init);
module_exit(cros_ec_dev_exit);
+MODULE_ALIAS("platform:" DRV_NAME);
MODULE_AUTHOR("Bill Richardson <wfrichar@chromium.org>");
MODULE_DESCRIPTION("Userspace interface to the Chrome OS Embedded Controller");
MODULE_VERSION("1.0");
diff --git a/drivers/platform/chrome/cros_ec_dev.h b/drivers/mfd/cros_ec_dev.h
index 45e9453..45e9453 100644
--- a/drivers/platform/chrome/cros_ec_dev.h
+++ b/drivers/mfd/cros_ec_dev.h
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index c971407..1b52b85 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -72,8 +72,7 @@
* struct cros_ec_spi - information about a SPI-connected EC
*
* @spi: SPI device we are connected to
- * @last_transfer_ns: time that we last finished a transfer, or 0 if there
- * if no record
+ * @last_transfer_ns: time that we last finished a transfer.
* @start_of_msg_delay: used to set the delay_usecs on the spi_transfer that
* is sent when we want to turn on CS at the start of a transaction.
* @end_of_msg_delay: used to set the delay_usecs on the spi_transfer that
@@ -377,19 +376,17 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
u8 *ptr;
u8 *rx_buf;
u8 sum;
+ u8 rx_byte;
int ret = 0, final_ret;
+ unsigned long delay;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
/* If it's too soon to do another transaction, wait */
- if (ec_spi->last_transfer_ns) {
- unsigned long delay; /* The delay completed so far */
-
- delay = ktime_get_ns() - ec_spi->last_transfer_ns;
- if (delay < EC_SPI_RECOVERY_TIME_NS)
- ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
- }
+ delay = ktime_get_ns() - ec_spi->last_transfer_ns;
+ if (delay < EC_SPI_RECOVERY_TIME_NS)
+ ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
rx_buf = kzalloc(len, GFP_KERNEL);
if (!rx_buf)
@@ -421,25 +418,22 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
if (!ret) {
/* Verify that EC can process command */
for (i = 0; i < len; i++) {
- switch (rx_buf[i]) {
- case EC_SPI_PAST_END:
- case EC_SPI_RX_BAD_DATA:
- case EC_SPI_NOT_READY:
- ret = -EAGAIN;
- ec_msg->result = EC_RES_IN_PROGRESS;
- default:
+ rx_byte = rx_buf[i];
+ if (rx_byte == EC_SPI_PAST_END ||
+ rx_byte == EC_SPI_RX_BAD_DATA ||
+ rx_byte == EC_SPI_NOT_READY) {
+ ret = -EREMOTEIO;
break;
}
- if (ret)
- break;
}
- if (!ret)
- ret = cros_ec_spi_receive_packet(ec_dev,
- ec_msg->insize + sizeof(*response));
- } else {
- dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
}
+ if (!ret)
+ ret = cros_ec_spi_receive_packet(ec_dev,
+ ec_msg->insize + sizeof(*response));
+ else
+ dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
final_ret = terminate_request(ec_dev);
spi_bus_unlock(ec_spi->spi->master);
@@ -508,20 +502,18 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
int i, len;
u8 *ptr;
u8 *rx_buf;
+ u8 rx_byte;
int sum;
int ret = 0, final_ret;
+ unsigned long delay;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
/* If it's too soon to do another transaction, wait */
- if (ec_spi->last_transfer_ns) {
- unsigned long delay; /* The delay completed so far */
-
- delay = ktime_get_ns() - ec_spi->last_transfer_ns;
- if (delay < EC_SPI_RECOVERY_TIME_NS)
- ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
- }
+ delay = ktime_get_ns() - ec_spi->last_transfer_ns;
+ if (delay < EC_SPI_RECOVERY_TIME_NS)
+ ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
rx_buf = kzalloc(len, GFP_KERNEL);
if (!rx_buf)
@@ -544,25 +536,22 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
if (!ret) {
/* Verify that EC can process command */
for (i = 0; i < len; i++) {
- switch (rx_buf[i]) {
- case EC_SPI_PAST_END:
- case EC_SPI_RX_BAD_DATA:
- case EC_SPI_NOT_READY:
- ret = -EAGAIN;
- ec_msg->result = EC_RES_IN_PROGRESS;
- default:
+ rx_byte = rx_buf[i];
+ if (rx_byte == EC_SPI_PAST_END ||
+ rx_byte == EC_SPI_RX_BAD_DATA ||
+ rx_byte == EC_SPI_NOT_READY) {
+ ret = -EREMOTEIO;
break;
}
- if (ret)
- break;
}
- if (!ret)
- ret = cros_ec_spi_receive_response(ec_dev,
- ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
- } else {
- dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
}
+ if (!ret)
+ ret = cros_ec_spi_receive_response(ec_dev,
+ ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
+ else
+ dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
final_ret = terminate_request(ec_dev);
spi_bus_unlock(ec_spi->spi->master);
@@ -667,6 +656,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
sizeof(struct ec_response_get_protocol_info);
ec_dev->dout_size = sizeof(struct ec_host_request);
+ ec_spi->last_transfer_ns = ktime_get_ns();
err = cros_ec_register(ec_dev);
if (err) {
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 0e0ab9b..9e545eb 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -450,6 +450,8 @@ int intel_lpss_probe(struct device *dev,
if (ret)
goto err_remove_ltr;
+ dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
+
return 0;
err_remove_ltr:
@@ -478,7 +480,9 @@ EXPORT_SYMBOL_GPL(intel_lpss_remove);
static int resume_lpss_device(struct device *dev, void *data)
{
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ pm_runtime_resume(dev);
+
return 0;
}
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index 36adf9e..274306d 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -16,7 +16,6 @@
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
-#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/mfd/core.h>
#include <linux/i2c.h>
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 55d824b..390b27c 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -458,7 +458,7 @@ static int kempld_probe(struct platform_device *pdev)
return -EINVAL;
pld->io_base = devm_ioport_map(dev, ioport->start,
- ioport->end - ioport->start);
+ resource_size(ioport));
if (!pld->io_base)
return -ENOMEM;
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index cf1120a..53dc1a4 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -1143,11 +1143,6 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
res->end = res->start + SPIBASE_APL_SZ - 1;
pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- if (!(bcr & BCR_WPD)) {
- bcr |= BCR_WPD;
- pci_bus_write_config_dword(bus, spi, BCR, bcr);
- pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- }
info->writeable = !!(bcr & BCR_WPD);
}
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index dc5caea..da9612d 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -15,7 +15,6 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77843-private.h>
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 3922a93..663a239 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -430,6 +430,7 @@ static void palmas_power_off(void)
{
unsigned int addr;
int ret, slave;
+ u8 powerhold_mask;
struct device_node *np = palmas_dev->dev->of_node;
if (of_property_read_bool(np, "ti,palmas-override-powerhold")) {
@@ -437,8 +438,15 @@ static void palmas_power_off(void)
PALMAS_PRIMARY_SECONDARY_PAD2);
slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
+ if (of_device_is_compatible(np, "ti,tps65917"))
+ powerhold_mask =
+ TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK;
+ else
+ powerhold_mask =
+ PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK;
+
ret = regmap_update_bits(palmas_dev->regmap[slave], addr,
- PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK, 0);
+ powerhold_mask, 0);
if (ret)
dev_err(palmas_dev->dev,
"Unable to write PRIMARY_SECONDARY_PAD2 %d\n",
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 6155d12..f952dff 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -149,7 +149,7 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
*pdev = platform_device_alloc(name, -1);
if (!*pdev) {
- dev_err(pcf->dev, "Falied to allocate %s\n", name);
+ dev_err(pcf->dev, "Failed to allocate %s\n", name);
return;
}
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
new file mode 100644
index 0000000..5c858e7
--- /dev/null
+++ b/drivers/mfd/rave-sp.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Multifunction core driver for Zodiac Inflight Innovations RAVE
+ * Supervisory Processor(SP) MCU that is connected via dedicated UART
+ * port
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#include <linux/atomic.h>
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rave-sp.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/sched.h>
+#include <linux/serdev.h>
+#include <asm/unaligned.h>
+
+/*
+ * UART protocol using following entities:
+ * - message to MCU => ACK response
+ * - event from MCU => event ACK
+ *
+ * Frame structure:
+ * <STX> <DATA> <CHECKSUM> <ETX>
+ * Where:
+ * - STX - is start of transmission character
+ * - ETX - end of transmission
+ * - DATA - payload
+ * - CHECKSUM - checksum calculated on <DATA>
+ *
+ * If <DATA> or <CHECKSUM> contain one of control characters, then it is
+ * escaped using <DLE> control code. Added <DLE> does not participate in
+ * checksum calculation.
+ */
+#define RAVE_SP_STX 0x02
+#define RAVE_SP_ETX 0x03
+#define RAVE_SP_DLE 0x10
+
+#define RAVE_SP_MAX_DATA_SIZE 64
+#define RAVE_SP_CHECKSUM_SIZE 2 /* Worst case scenario on RDU2 */
+/*
+ * We don't store STX, ETX and unescaped bytes, so Rx is only
+ * DATA + CSUM
+ */
+#define RAVE_SP_RX_BUFFER_SIZE \
+ (RAVE_SP_MAX_DATA_SIZE + RAVE_SP_CHECKSUM_SIZE)
+
+#define RAVE_SP_STX_ETX_SIZE 2
+/*
+ * For Tx we have to have space for everything, STX, EXT and
+ * potentially stuffed DATA + CSUM data + csum
+ */
+#define RAVE_SP_TX_BUFFER_SIZE \
+ (RAVE_SP_STX_ETX_SIZE + 2 * RAVE_SP_RX_BUFFER_SIZE)
+
+#define RAVE_SP_BOOT_SOURCE_GET 0
+#define RAVE_SP_BOOT_SOURCE_SET 1
+
+#define RAVE_SP_RDU2_BOARD_TYPE_RMB 0
+#define RAVE_SP_RDU2_BOARD_TYPE_DEB 1
+
+#define RAVE_SP_BOOT_SOURCE_SD 0
+#define RAVE_SP_BOOT_SOURCE_EMMC 1
+#define RAVE_SP_BOOT_SOURCE_NOR 2
+
+/**
+ * enum rave_sp_deframer_state - Possible state for de-framer
+ *
+ * @RAVE_SP_EXPECT_SOF: Scanning input for start-of-frame marker
+ * @RAVE_SP_EXPECT_DATA: Got start of frame marker, collecting frame
+ * @RAVE_SP_EXPECT_ESCAPED_DATA: Got escape character, collecting escaped byte
+ */
+enum rave_sp_deframer_state {
+ RAVE_SP_EXPECT_SOF,
+ RAVE_SP_EXPECT_DATA,
+ RAVE_SP_EXPECT_ESCAPED_DATA,
+};
+
+/**
+ * struct rave_sp_deframer - Device protocol deframer
+ *
+ * @state: Current state of the deframer
+ * @data: Buffer used to collect deframed data
+ * @length: Number of bytes de-framed so far
+ */
+struct rave_sp_deframer {
+ enum rave_sp_deframer_state state;
+ unsigned char data[RAVE_SP_RX_BUFFER_SIZE];
+ size_t length;
+};
+
+/**
+ * struct rave_sp_reply - Reply as per RAVE device protocol
+ *
+ * @length: Expected reply length
+ * @data: Buffer to store reply payload in
+ * @code: Expected reply code
+ * @ackid: Expected reply ACK ID
+ * @completion: Successful reply reception completion
+ */
+struct rave_sp_reply {
+ size_t length;
+ void *data;
+ u8 code;
+ u8 ackid;
+ struct completion received;
+};
+
+/**
+ * struct rave_sp_checksum - Variant specific checksum implementation details
+ *
+ * @length: Caculated checksum length
+ * @subroutine: Utilized checksum algorithm implementation
+ */
+struct rave_sp_checksum {
+ size_t length;
+ void (*subroutine)(const u8 *, size_t, u8 *);
+};
+
+/**
+ * struct rave_sp_variant_cmds - Variant specific command routines
+ *
+ * @translate: Generic to variant specific command mapping routine
+ *
+ */
+struct rave_sp_variant_cmds {
+ int (*translate)(enum rave_sp_command);
+};
+
+/**
+ * struct rave_sp_variant - RAVE supervisory processor core variant
+ *
+ * @checksum: Variant specific checksum implementation
+ * @cmd: Variant specific command pointer table
+ *
+ */
+struct rave_sp_variant {
+ const struct rave_sp_checksum *checksum;
+ struct rave_sp_variant_cmds cmd;
+};
+
+/**
+ * struct rave_sp - RAVE supervisory processor core
+ *
+ * @serdev: Pointer to underlying serdev
+ * @deframer: Stored state of the protocol deframer
+ * @ackid: ACK ID used in last reply sent to the device
+ * @bus_lock: Lock to serialize access to the device
+ * @reply_lock: Lock protecting @reply
+ * @reply: Pointer to memory to store reply payload
+ *
+ * @variant: Device variant specific information
+ * @event_notifier_list: Input event notification chain
+ *
+ */
+struct rave_sp {
+ struct serdev_device *serdev;
+ struct rave_sp_deframer deframer;
+ atomic_t ackid;
+ struct mutex bus_lock;
+ struct mutex reply_lock;
+ struct rave_sp_reply *reply;
+
+ const struct rave_sp_variant *variant;
+ struct blocking_notifier_head event_notifier_list;
+};
+
+static bool rave_sp_id_is_event(u8 code)
+{
+ return (code & 0xF0) == RAVE_SP_EVNT_BASE;
+}
+
+static void rave_sp_unregister_event_notifier(struct device *dev, void *res)
+{
+ struct rave_sp *sp = dev_get_drvdata(dev->parent);
+ struct notifier_block *nb = *(struct notifier_block **)res;
+ struct blocking_notifier_head *bnh = &sp->event_notifier_list;
+
+ WARN_ON(blocking_notifier_chain_unregister(bnh, nb));
+}
+
+int devm_rave_sp_register_event_notifier(struct device *dev,
+ struct notifier_block *nb)
+{
+ struct rave_sp *sp = dev_get_drvdata(dev->parent);
+ struct notifier_block **rcnb;
+ int ret;
+
+ rcnb = devres_alloc(rave_sp_unregister_event_notifier,
+ sizeof(*rcnb), GFP_KERNEL);
+ if (!rcnb)
+ return -ENOMEM;
+
+ ret = blocking_notifier_chain_register(&sp->event_notifier_list, nb);
+ if (!ret) {
+ *rcnb = nb;
+ devres_add(dev, rcnb);
+ } else {
+ devres_free(rcnb);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_rave_sp_register_event_notifier);
+
+static void csum_8b2c(const u8 *buf, size_t size, u8 *crc)
+{
+ *crc = *buf++;
+ size--;
+
+ while (size--)
+ *crc += *buf++;
+
+ *crc = 1 + ~(*crc);
+}
+
+static void csum_ccitt(const u8 *buf, size_t size, u8 *crc)
+{
+ const u16 calculated = crc_ccitt_false(0xffff, buf, size);
+
+ /*
+ * While the rest of the wire protocol is little-endian,
+ * CCITT-16 CRC in RDU2 device is sent out in big-endian order.
+ */
+ put_unaligned_be16(calculated, crc);
+}
+
+static void *stuff(unsigned char *dest, const unsigned char *src, size_t n)
+{
+ while (n--) {
+ const unsigned char byte = *src++;
+
+ switch (byte) {
+ case RAVE_SP_STX:
+ case RAVE_SP_ETX:
+ case RAVE_SP_DLE:
+ *dest++ = RAVE_SP_DLE;
+ /* FALLTHROUGH */
+ default:
+ *dest++ = byte;
+ }
+ }
+
+ return dest;
+}
+
+static int rave_sp_write(struct rave_sp *sp, const u8 *data, u8 data_size)
+{
+ const size_t checksum_length = sp->variant->checksum->length;
+ unsigned char frame[RAVE_SP_TX_BUFFER_SIZE];
+ unsigned char crc[RAVE_SP_CHECKSUM_SIZE];
+ unsigned char *dest = frame;
+ size_t length;
+
+ if (WARN_ON(checksum_length > sizeof(crc)))
+ return -ENOMEM;
+
+ if (WARN_ON(data_size > sizeof(frame)))
+ return -ENOMEM;
+
+ sp->variant->checksum->subroutine(data, data_size, crc);
+
+ *dest++ = RAVE_SP_STX;
+ dest = stuff(dest, data, data_size);
+ dest = stuff(dest, crc, checksum_length);
+ *dest++ = RAVE_SP_ETX;
+
+ length = dest - frame;
+
+ print_hex_dump(KERN_DEBUG, "rave-sp tx: ", DUMP_PREFIX_NONE,
+ 16, 1, frame, length, false);
+
+ return serdev_device_write(sp->serdev, frame, length, HZ);
+}
+
+static u8 rave_sp_reply_code(u8 command)
+{
+ /*
+ * There isn't a single rule that describes command code ->
+ * ACK code transformation, but, going through various
+ * versions of ICDs, there appear to be three distinct groups
+ * that can be described by simple transformation.
+ */
+ switch (command) {
+ case 0xA0 ... 0xBE:
+ /*
+ * Commands implemented by firmware found in RDU1 and
+ * older devices all seem to obey the following rule
+ */
+ return command + 0x20;
+ case 0xE0 ... 0xEF:
+ /*
+ * Events emitted by all versions of the firmare use
+ * least significant bit to get an ACK code
+ */
+ return command | 0x01;
+ default:
+ /*
+ * Commands implemented by firmware found in RDU2 are
+ * similar to "old" commands, but they use slightly
+ * different offset
+ */
+ return command + 0x40;
+ }
+}
+
+int rave_sp_exec(struct rave_sp *sp,
+ void *__data, size_t data_size,
+ void *reply_data, size_t reply_data_size)
+{
+ struct rave_sp_reply reply = {
+ .data = reply_data,
+ .length = reply_data_size,
+ .received = COMPLETION_INITIALIZER_ONSTACK(reply.received),
+ };
+ unsigned char *data = __data;
+ int command, ret = 0;
+ u8 ackid;
+
+ command = sp->variant->cmd.translate(data[0]);
+ if (command < 0)
+ return command;
+
+ ackid = atomic_inc_return(&sp->ackid);
+ reply.ackid = ackid;
+ reply.code = rave_sp_reply_code((u8)command),
+
+ mutex_lock(&sp->bus_lock);
+
+ mutex_lock(&sp->reply_lock);
+ sp->reply = &reply;
+ mutex_unlock(&sp->reply_lock);
+
+ data[0] = command;
+ data[1] = ackid;
+
+ rave_sp_write(sp, data, data_size);
+
+ if (!wait_for_completion_timeout(&reply.received, HZ)) {
+ dev_err(&sp->serdev->dev, "Command timeout\n");
+ ret = -ETIMEDOUT;
+
+ mutex_lock(&sp->reply_lock);
+ sp->reply = NULL;
+ mutex_unlock(&sp->reply_lock);
+ }
+
+ mutex_unlock(&sp->bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rave_sp_exec);
+
+static void rave_sp_receive_event(struct rave_sp *sp,
+ const unsigned char *data, size_t length)
+{
+ u8 cmd[] = {
+ [0] = rave_sp_reply_code(data[0]),
+ [1] = data[1],
+ };
+
+ rave_sp_write(sp, cmd, sizeof(cmd));
+
+ blocking_notifier_call_chain(&sp->event_notifier_list,
+ rave_sp_action_pack(data[0], data[2]),
+ NULL);
+}
+
+static void rave_sp_receive_reply(struct rave_sp *sp,
+ const unsigned char *data, size_t length)
+{
+ struct device *dev = &sp->serdev->dev;
+ struct rave_sp_reply *reply;
+ const size_t payload_length = length - 2;
+
+ mutex_lock(&sp->reply_lock);
+ reply = sp->reply;
+
+ if (reply) {
+ if (reply->code == data[0] && reply->ackid == data[1] &&
+ payload_length >= reply->length) {
+ /*
+ * We are relying on memcpy(dst, src, 0) to be a no-op
+ * when handling commands that have a no-payload reply
+ */
+ memcpy(reply->data, &data[2], reply->length);
+ complete(&reply->received);
+ sp->reply = NULL;
+ } else {
+ dev_err(dev, "Ignoring incorrect reply\n");
+ dev_dbg(dev, "Code: expected = 0x%08x received = 0x%08x\n",
+ reply->code, data[0]);
+ dev_dbg(dev, "ACK ID: expected = 0x%08x received = 0x%08x\n",
+ reply->ackid, data[1]);
+ dev_dbg(dev, "Length: expected = %zu received = %zu\n",
+ reply->length, payload_length);
+ }
+ }
+
+ mutex_unlock(&sp->reply_lock);
+}
+
+static void rave_sp_receive_frame(struct rave_sp *sp,
+ const unsigned char *data,
+ size_t length)
+{
+ const size_t checksum_length = sp->variant->checksum->length;
+ const size_t payload_length = length - checksum_length;
+ const u8 *crc_reported = &data[payload_length];
+ struct device *dev = &sp->serdev->dev;
+ u8 crc_calculated[checksum_length];
+
+ print_hex_dump(KERN_DEBUG, "rave-sp rx: ", DUMP_PREFIX_NONE,
+ 16, 1, data, length, false);
+
+ if (unlikely(length <= checksum_length)) {
+ dev_warn(dev, "Dropping short frame\n");
+ return;
+ }
+
+ sp->variant->checksum->subroutine(data, payload_length,
+ crc_calculated);
+
+ if (memcmp(crc_calculated, crc_reported, checksum_length)) {
+ dev_warn(dev, "Dropping bad frame\n");
+ return;
+ }
+
+ if (rave_sp_id_is_event(data[0]))
+ rave_sp_receive_event(sp, data, length);
+ else
+ rave_sp_receive_reply(sp, data, length);
+}
+
+static int rave_sp_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t size)
+{
+ struct device *dev = &serdev->dev;
+ struct rave_sp *sp = dev_get_drvdata(dev);
+ struct rave_sp_deframer *deframer = &sp->deframer;
+ const unsigned char *src = buf;
+ const unsigned char *end = buf + size;
+
+ while (src < end) {
+ const unsigned char byte = *src++;
+
+ switch (deframer->state) {
+ case RAVE_SP_EXPECT_SOF:
+ if (byte == RAVE_SP_STX)
+ deframer->state = RAVE_SP_EXPECT_DATA;
+ break;
+
+ case RAVE_SP_EXPECT_DATA:
+ /*
+ * Treat special byte values first
+ */
+ switch (byte) {
+ case RAVE_SP_ETX:
+ rave_sp_receive_frame(sp,
+ deframer->data,
+ deframer->length);
+ /*
+ * Once we extracted a complete frame
+ * out of a stream, we call it done
+ * and proceed to bailing out while
+ * resetting the framer to initial
+ * state, regardless if we've consumed
+ * all of the stream or not.
+ */
+ goto reset_framer;
+ case RAVE_SP_STX:
+ dev_warn(dev, "Bad frame: STX before ETX\n");
+ /*
+ * If we encounter second "start of
+ * the frame" marker before seeing
+ * corresponding "end of frame", we
+ * reset the framer and ignore both:
+ * frame started by first SOF and
+ * frame started by current SOF.
+ *
+ * NOTE: The above means that only the
+ * frame started by third SOF, sent
+ * after this one will have a chance
+ * to get throught.
+ */
+ goto reset_framer;
+ case RAVE_SP_DLE:
+ deframer->state = RAVE_SP_EXPECT_ESCAPED_DATA;
+ /*
+ * If we encounter escape sequence we
+ * need to skip it and collect the
+ * byte that follows. We do it by
+ * forcing the next iteration of the
+ * encompassing while loop.
+ */
+ continue;
+ }
+ /*
+ * For the rest of the bytes, that are not
+ * speical snoflakes, we do the same thing
+ * that we do to escaped data - collect it in
+ * deframer buffer
+ */
+
+ /* FALLTHROUGH */
+
+ case RAVE_SP_EXPECT_ESCAPED_DATA:
+ deframer->data[deframer->length++] = byte;
+
+ if (deframer->length == sizeof(deframer->data)) {
+ dev_warn(dev, "Bad frame: Too long\n");
+ /*
+ * If the amount of data we've
+ * accumulated for current frame so
+ * far starts to exceed the capacity
+ * of deframer's buffer, there's
+ * nothing else we can do but to
+ * discard that data and start
+ * assemblying a new frame again
+ */
+ goto reset_framer;
+ }
+
+ /*
+ * We've extracted out special byte, now we
+ * can go back to regular data collecting
+ */
+ deframer->state = RAVE_SP_EXPECT_DATA;
+ break;
+ }
+ }
+
+ /*
+ * The only way to get out of the above loop and end up here
+ * is throught consuming all of the supplied data, so here we
+ * report that we processed it all.
+ */
+ return size;
+
+reset_framer:
+ /*
+ * NOTE: A number of codepaths that will drop us here will do
+ * so before consuming all 'size' bytes of the data passed by
+ * serdev layer. We rely on the fact that serdev layer will
+ * re-execute this handler with the remainder of the Rx bytes
+ * once we report actual number of bytes that we processed.
+ */
+ deframer->state = RAVE_SP_EXPECT_SOF;
+ deframer->length = 0;
+
+ return src - buf;
+}
+
+static int rave_sp_rdu1_cmd_translate(enum rave_sp_command command)
+{
+ if (command >= RAVE_SP_CMD_STATUS &&
+ command <= RAVE_SP_CMD_CONTROL_EVENTS)
+ return command;
+
+ return -EINVAL;
+}
+
+static int rave_sp_rdu2_cmd_translate(enum rave_sp_command command)
+{
+ if (command >= RAVE_SP_CMD_GET_FIRMWARE_VERSION &&
+ command <= RAVE_SP_CMD_GET_GPIO_STATE)
+ return command;
+
+ if (command == RAVE_SP_CMD_REQ_COPPER_REV) {
+ /*
+ * As per RDU2 ICD 3.4.47 CMD_GET_COPPER_REV code is
+ * different from that for RDU1 and it is set to 0x28.
+ */
+ return 0x28;
+ }
+
+ return rave_sp_rdu1_cmd_translate(command);
+}
+
+static int rave_sp_default_cmd_translate(enum rave_sp_command command)
+{
+ /*
+ * All of the following command codes were taken from "Table :
+ * Communications Protocol Message Types" in section 3.3
+ * "MESSAGE TYPES" of Rave PIC24 ICD.
+ */
+ switch (command) {
+ case RAVE_SP_CMD_GET_FIRMWARE_VERSION:
+ return 0x11;
+ case RAVE_SP_CMD_GET_BOOTLOADER_VERSION:
+ return 0x12;
+ case RAVE_SP_CMD_BOOT_SOURCE:
+ return 0x14;
+ case RAVE_SP_CMD_SW_WDT:
+ return 0x1C;
+ case RAVE_SP_CMD_RESET:
+ return 0x1E;
+ case RAVE_SP_CMD_RESET_REASON:
+ return 0x1F;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct rave_sp_checksum rave_sp_checksum_8b2c = {
+ .length = 1,
+ .subroutine = csum_8b2c,
+};
+
+static const struct rave_sp_checksum rave_sp_checksum_ccitt = {
+ .length = 2,
+ .subroutine = csum_ccitt,
+};
+
+static const struct rave_sp_variant rave_sp_legacy = {
+ .checksum = &rave_sp_checksum_8b2c,
+ .cmd = {
+ .translate = rave_sp_default_cmd_translate,
+ },
+};
+
+static const struct rave_sp_variant rave_sp_rdu1 = {
+ .checksum = &rave_sp_checksum_8b2c,
+ .cmd = {
+ .translate = rave_sp_rdu1_cmd_translate,
+ },
+};
+
+static const struct rave_sp_variant rave_sp_rdu2 = {
+ .checksum = &rave_sp_checksum_ccitt,
+ .cmd = {
+ .translate = rave_sp_rdu2_cmd_translate,
+ },
+};
+
+static const struct of_device_id rave_sp_dt_ids[] = {
+ { .compatible = "zii,rave-sp-niu", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-mezz", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-esb", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-rdu1", .data = &rave_sp_rdu1 },
+ { .compatible = "zii,rave-sp-rdu2", .data = &rave_sp_rdu2 },
+ { /* sentinel */ }
+};
+
+static const struct serdev_device_ops rave_sp_serdev_device_ops = {
+ .receive_buf = rave_sp_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static int rave_sp_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct rave_sp *sp;
+ u32 baud;
+ int ret;
+
+ if (of_property_read_u32(dev->of_node, "current-speed", &baud)) {
+ dev_err(dev,
+ "'current-speed' is not specified in device node\n");
+ return -EINVAL;
+ }
+
+ sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
+ if (!sp)
+ return -ENOMEM;
+
+ sp->serdev = serdev;
+ dev_set_drvdata(dev, sp);
+
+ sp->variant = of_device_get_match_data(dev);
+ if (!sp->variant)
+ return -ENODEV;
+
+ mutex_init(&sp->bus_lock);
+ mutex_init(&sp->reply_lock);
+ BLOCKING_INIT_NOTIFIER_HEAD(&sp->event_notifier_list);
+
+ serdev_device_set_client_ops(serdev, &rave_sp_serdev_device_ops);
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, baud);
+
+ return devm_of_platform_populate(dev);
+}
+
+MODULE_DEVICE_TABLE(of, rave_sp_dt_ids);
+
+static struct serdev_device_driver rave_sp_drv = {
+ .probe = rave_sp_probe,
+ .driver = {
+ .name = "rave-sp",
+ .of_match_table = rave_sp_dt_ids,
+ },
+};
+module_serdev_device_driver(rave_sp_drv);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>");
+MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("RAVE SP core driver");
diff --git a/drivers/mfd/stm32-lptimer.c b/drivers/mfd/stm32-lptimer.c
index 075330a..a00f99f 100644
--- a/drivers/mfd/stm32-lptimer.c
+++ b/drivers/mfd/stm32-lptimer.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* STM32 Low-Power Timer parent driver.
- *
* Copyright (C) STMicroelectronics 2017
- *
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>
- *
* Inspired by Benjamin Gaignard's stm32-timers driver
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/mfd/stm32-lptimer.h>
diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
index a6675a4..1d347e5 100644
--- a/drivers/mfd/stm32-timers.c
+++ b/drivers/mfd/stm32-timers.c
@@ -1,9 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2016
- *
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/mfd/stm32-timers.h>
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index b93fe4c..7eaa40b 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -13,6 +13,7 @@
*/
#include <linux/err.h>
+#include <linux/hwspinlock.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -87,6 +88,24 @@ static struct syscon *of_syscon_register(struct device_node *np)
if (ret)
reg_io_width = 4;
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
+ syscon_config.use_hwlock = true;
+ syscon_config.hwlock_id = ret;
+ syscon_config.hwlock_mode = HWLOCK_IRQSTATE;
+ } else if (ret < 0) {
+ switch (ret) {
+ case -ENOENT:
+ /* Ignore missing hwlock, it's optional. */
+ break;
+ default:
+ pr_err("Failed to retrieve valid hwlock: %d\n", ret);
+ /* fall-through */
+ case -EPROBE_DEFER:
+ goto err_regmap;
+ }
+ }
+
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
syscon_config.max_register = resource_size(&res) - reg_io_width;
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 0f3fab4..3cd958a 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -124,7 +124,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
struct ti_tscadc_dev *tscadc;
struct resource *res;
struct clk *clk;
- struct device_node *node = pdev->dev.of_node;
+ struct device_node *node;
struct mfd_cell *cell;
struct property *prop;
const __be32 *cur;
diff --git a/drivers/mfd/tmio_core.c b/drivers/mfd/tmio_core.c
index 83af78c..ebf54cc 100644
--- a/drivers/mfd/tmio_core.c
+++ b/drivers/mfd/tmio_core.c
@@ -9,6 +9,26 @@
#include <linux/export.h>
#include <linux/mfd/tmio.h>
+#define CNF_CMD 0x04
+#define CNF_CTL_BASE 0x10
+#define CNF_INT_PIN 0x3d
+#define CNF_STOP_CLK_CTL 0x40
+#define CNF_GCLK_CTL 0x41
+#define CNF_SD_CLK_MODE 0x42
+#define CNF_PIN_STATUS 0x44
+#define CNF_PWR_CTL_1 0x48
+#define CNF_PWR_CTL_2 0x49
+#define CNF_PWR_CTL_3 0x4a
+#define CNF_CARD_DETECT_MODE 0x4c
+#define CNF_SD_SLOT 0x50
+#define CNF_EXT_GCLK_CTL_1 0xf0
+#define CNF_EXT_GCLK_CTL_2 0xf1
+#define CNF_EXT_GCLK_CTL_3 0xf9
+#define CNF_SD_LED_EN_1 0xfa
+#define CNF_SD_LED_EN_2 0xfe
+
+#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
+
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base)
{
/* Enable the MMC/SD Control registers */
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
index da16bf4..dc94ffc 100644
--- a/drivers/mfd/twl4030-audio.c
+++ b/drivers/mfd/twl4030-audio.c
@@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void)
EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
- struct device_node *node)
+ struct device_node *parent)
{
+ struct device_node *node;
+
if (pdata && pdata->codec)
return true;
- if (of_find_node_by_name(node, "codec"))
+ node = of_get_child_by_name(parent, "codec");
+ if (node) {
+ of_node_put(node);
return true;
+ }
return false;
}
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index d66502d..dd19f17 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = {
};
-static bool twl6040_has_vibra(struct device_node *node)
+static bool twl6040_has_vibra(struct device_node *parent)
{
-#ifdef CONFIG_OF
- if (of_find_node_by_name(node, "vibra"))
+ struct device_node *node;
+
+ node = of_get_child_by_name(parent, "vibra");
+ if (node) {
+ of_node_put(node);
return true;
-#endif
+ }
+
return false;
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index f1a5c23..7c0fa24 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -496,6 +496,10 @@ config PCI_ENDPOINT_TEST
Enable this configuration option to enable the host side test driver
for PCI Endpoint.
+config MISC_RTSX
+ tristate
+ default MISC_RTSX_PCI || MISC_RTSX_USB
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -508,4 +512,5 @@ source "drivers/misc/mic/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
+source "drivers/misc/cardreader/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 5ca5f64..8d8cc09 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
+obj-$(CONFIG_MISC_RTSX) += cardreader/
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
diff --git a/drivers/misc/cardreader/Kconfig b/drivers/misc/cardreader/Kconfig
new file mode 100644
index 0000000..69e815e
--- /dev/null
+++ b/drivers/misc/cardreader/Kconfig
@@ -0,0 +1,20 @@
+config MISC_RTSX_PCI
+ tristate "Realtek PCI-E card reader"
+ depends on PCI
+ select MFD_CORE
+ help
+ This supports for Realtek PCI-Express card reader including rts5209,
+ rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, rts5260.
+ Realtek card readers support access to many types of memory cards,
+ such as Memory Stick, Memory Stick Pro, Secure Digital and
+ MultiMediaCard.
+
+config MISC_RTSX_USB
+ tristate "Realtek USB card reader"
+ depends on USB
+ select MFD_CORE
+ help
+ Select this option to get support for Realtek USB 2.0 card readers
+ including RTS5129, RTS5139, RTS5179 and RTS5170.
+ Realtek card reader supports access to many types of memory cards,
+ such as Memory Stick Pro, Secure Digital and MultiMediaCard.
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
new file mode 100644
index 0000000..9fabfcc
--- /dev/null
+++ b/drivers/misc/cardreader/Makefile
@@ -0,0 +1,4 @@
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o
+
+obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o
+obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o
diff --git a/drivers/mfd/rtl8411.c b/drivers/misc/cardreader/rtl8411.c
index b3ae659..434fd07 100644
--- a/drivers/mfd/rtl8411.c
+++ b/drivers/misc/cardreader/rtl8411.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5209.c b/drivers/misc/cardreader/rts5209.c
index b95beec..ce68c48 100644
--- a/drivers/mfd/rts5209.c
+++ b/drivers/misc/cardreader/rts5209.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5227.c b/drivers/misc/cardreader/rts5227.c
index ff296a4..024dcba 100644
--- a/drivers/mfd/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5229.c b/drivers/misc/cardreader/rts5229.c
index 9ed9dc8..9119261 100644
--- a/drivers/mfd/rts5229.c
+++ b/drivers/misc/cardreader/rts5229.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 7fcf37b..dbe013a 100644
--- a/drivers/mfd/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
@@ -738,4 +738,3 @@ void rts525a_init_params(struct rtsx_pcr *pcr)
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts525a_pcr_ops;
}
-
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
new file mode 100644
index 0000000..07cb93a
--- /dev/null
+++ b/drivers/misc/cardreader/rts5260.c
@@ -0,0 +1,748 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2016-2017 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Steven FENG <steven_feng@realsil.com.cn>
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5260.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5260_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & IC_VERSION_MASK;
+}
+
+static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[6][3] = {
+ {0x94, 0x94, 0x94},
+ {0x11, 0x11, 0x18},
+ {0x55, 0x55, 0x5C},
+ {0x94, 0x94, 0x94},
+ {0x94, 0x94, 0x94},
+ {0xFF, 0xFF, 0xFF},
+ };
+ u8 driving_1v8[6][3] = {
+ {0x9A, 0x89, 0x89},
+ {0xC4, 0xC4, 0xC4},
+ {0x3C, 0x3C, 0x3C},
+ {0x9B, 0x99, 0x99},
+ {0x9A, 0x89, 0x89},
+ {0xFE, 0xFE, 0xFE},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ u32 reg;
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ if (!rtsx_vendor_setting_valid(reg)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+
+ pcr->aspm_en = rtsx_reg_to_aspm(reg);
+ pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->card_drive_sel &= 0x3F;
+ pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+ if (rtsx_reg_check_reverse_socket(reg))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
+}
+
+static int rtsx_base_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5260_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0,
+ RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_ON);
+}
+
+static int rts5260_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0,
+ RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_OFF);
+}
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5260_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0xAA),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5260_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ 0,
+};
+
+/* MS Pull Control Enable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5260_ms_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+/* MS Pull Control Disable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5260_ms_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+ | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ return 0;
+}
+
+static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG0,
+ RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
+ LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_ON);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_POWERON, DV331812_POWERON);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+ msleep(20);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+ pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ sd_set_sample_push_timing_sd30(pcr);
+
+ /* Initialize SD_CFG1 register */
+ rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE);
+
+ rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+ 0xFF, SD20_RX_POS_EDGE);
+ rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ /* Reset SD_CFG3 register */
+ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+ rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
+
+ rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0);
+
+ return err;
+}
+
+static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_write_register(pcr, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ rtsx_pci_write_register(pcr, LDO_DV18_CFG,
+ DV331812_MASK, DV331812_33);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_write_register(pcr, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ rtsx_pci_write_register(pcr, LDO_DV18_CFG,
+ DV331812_MASK, DV331812_17);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8,
+ SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rtsx_pci_init_cmd(pcr);
+ rts5260_fill_driving(pcr, voltage);
+ return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+}
+
+static void rts5260_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+ rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST);
+ rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5260_card_before_power_off(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rts5260_stop_cmd(pcr);
+ rts5260_switch_output_voltage(pcr, OUTPUT_3V3);
+
+ if (option->ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+}
+
+static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+
+ rts5260_card_before_power_off(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
+ LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_OFF);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_POWERON, DV331812_POWEROFF);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+ return err;
+}
+
+static void rts5260_init_ocp(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en) {
+ u8 mask, val;
+
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN);
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN);
+
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_THD_MASK,
+ option->sd_400mA_ocp_thd);
+
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_THD_MASK,
+ RTS5260_DVIO_OCP_THD_350);
+
+ rtsx_pci_write_register(pcr, RTS5260_DV331812_CFG,
+ RTS5260_DV331812_OCP_THD_MASK,
+ RTS5260_DV331812_OCP_THD_210);
+
+ mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK;
+ val = pcr->hw_param.ocp_glitch;
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+ rtsx_pci_enable_ocp(pcr);
+ } else {
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN, 0);
+ }
+}
+
+static void rts5260_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = 0;
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
+
+ val = SD_OCP_INT_EN | SD_DETECT_EN;
+ val |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN);
+}
+
+static void rts5260_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN, 0);
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+}
+
+int rts5260_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+ return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
+}
+
+int rts5260_get_ocpstat2(struct rtsx_pcr *pcr, u8 *val)
+{
+ return rtsx_pci_read_register(pcr, REG_DV3318_OCPSTAT, val);
+}
+
+void rts5260_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+ val = SD_OCP_INT_CLR | SD_OC_CLR;
+ val |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR);
+ udelay(10);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR, 0);
+}
+
+void rts5260_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (!pcr->option.ocp_en)
+ return;
+
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+ rts5260_get_ocpstat2(pcr, &pcr->ocp_stat2);
+ if (pcr->card_exist & SD_EXIST)
+ rtsx_sd_power_off_card3v3(pcr);
+ else if (pcr->card_exist & MS_EXIST)
+ rtsx_ms_power_off_card3v3(pcr);
+
+ if (!(pcr->card_exist & MS_EXIST) && !(pcr->card_exist & SD_EXIST)) {
+ if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
+ SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+ (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER)))
+ rtsx_pci_clear_ocpstat(pcr);
+ pcr->ocp_stat = 0;
+ pcr->ocp_stat2 = 0;
+ }
+
+ if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
+ SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+ (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) {
+ if (pcr->card_exist & SD_EXIST)
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ else if (pcr->card_exist & MS_EXIST)
+ rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
+ }
+}
+
+int rts5260_init_hw(struct rtsx_pcr *pcr)
+{
+ int err;
+
+ rtsx_pci_init_ocp(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG1,
+ AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+ /* Rest L1SUB Config */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG3, 0xFF, 0x00);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CLK_FORCE_CTL,
+ CLK_PM_EN, CLK_PM_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWD_SUSPEND_EN, 0xFF, 0xFF);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ PWR_GATE_EN, PWR_GATE_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, REG_VREF,
+ PWD_SUSPND_EN, PWD_SUSPND_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RBCTL,
+ U_AUTO_DMA_EN_MASK, U_AUTO_DMA_DISABLE);
+
+ if (pcr->flags & PCR_REVERSE_SOCKET)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG,
+ OBFF_EN_MASK, OBFF_DISABLE);
+
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
+{
+ int lss_l1_1, lss_l1_2;
+
+ lss_l1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN)
+ | rtsx_check_dev_flag(pcr, PM_L1_1_EN);
+ lss_l1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN)
+ | rtsx_check_dev_flag(pcr, PM_L1_2_EN);
+
+ if (lss_l1_2) {
+ pcr_dbg(pcr, "Set parameters for L1.2.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_2_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_2_PD_FE_EN);
+ } else if (lss_l1_1) {
+ pcr_dbg(pcr, "Set parameters for L1.1.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_1_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_1_PD_FE_EN);
+ } else {
+ pcr_dbg(pcr, "Set parameters for L1.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_0_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_0_PD_FE_EN);
+ }
+
+ rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_DPHY_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_MAC_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD30_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD40_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_SYS_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ /*Option cut APHY*/
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_0,
+ 0xFF, CFG_PCIE_APHY_OFF_0_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_1,
+ 0xFF, CFG_PCIE_APHY_OFF_1_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_2,
+ 0xFF, CFG_PCIE_APHY_OFF_2_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_3,
+ 0xFF, CFG_PCIE_APHY_OFF_3_DEFAULT);
+ /*CDR DEC*/
+ rtsx_pci_write_register(pcr, PWC_CDR, 0xFF, PWC_CDR_DEFAULT);
+ /*PWMPFM*/
+ rtsx_pci_write_register(pcr, CFG_LP_FPWM_VALUE,
+ 0xFF, CFG_LP_FPWM_VALUE_DEFAULT);
+ /*No Power Saving WA*/
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_MISC_RET_VALUE,
+ 0xFF, CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT);
+}
+
+static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u32 lval;
+
+ rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_5260, &lval);
+
+ if (lval & ASPM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & ASPM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+
+ rts5260_pwr_saving_setting(pcr);
+
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+}
+
+static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ /* Set mcu_cnt to 7 to ensure data can be sampled properly */
+ rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
+ rtsx_pci_write_register(pcr, SSC_DIV_N_0, 0xFF, 0x5D);
+
+ rts5260_init_from_cfg(pcr);
+
+ /* force no MDIO*/
+ rtsx_pci_write_register(pcr, RTS5260_AUTOLOAD_CFG4,
+ 0xFF, RTS5260_MIMO_DISABLE);
+ /*Modify SDVCC Tune Default Parameters!*/
+ rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
+ RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
+
+ rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+ rts5260_init_hw(pcr);
+
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ return 0;
+}
+
+void rts5260_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ if (enable)
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ if (!enable)
+ val = FORCE_ASPM_CTL0;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5260_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
+ int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST);
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* l1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ if (aspm_L1_1 || aspm_L1_2) {
+ if (rtsx_check_dev_flag(pcr,
+ LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
+ if (card_exist)
+ val &= ~L1OFF_MBIAS2_EN_5250;
+ else
+ val |= L1OFF_MBIAS2_EN_5250;
+ }
+ }
+ rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5260_pcr_ops = {
+ .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
+ .turn_on_led = rts5260_turn_on_led,
+ .turn_off_led = rts5260_turn_off_led,
+ .extra_init_hw = rts5260_extra_init_hw,
+ .enable_auto_blink = rtsx_base_enable_auto_blink,
+ .disable_auto_blink = rtsx_base_disable_auto_blink,
+ .card_power_on = rts5260_card_power_on,
+ .card_power_off = rts5260_card_power_off,
+ .switch_output_voltage = rts5260_switch_output_voltage,
+ .force_power_down = rtsx_base_force_power_down,
+ .stop_cmd = rts5260_stop_cmd,
+ .set_aspm = rts5260_set_aspm,
+ .set_l1off_cfg_sub_d0 = rts5260_set_l1off_cfg_sub_d0,
+ .enable_ocp = rts5260_enable_ocp,
+ .disable_ocp = rts5260_disable_ocp,
+ .init_ocp = rts5260_init_ocp,
+ .process_ocp = rts5260_process_ocp,
+ .get_ocpstat = rts5260_get_ocpstat,
+ .clear_ocpstat = rts5260_clear_ocpstat,
+};
+
+void rts5260_init_params(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 2;
+
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+ pcr->ic_version = rts5260_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5260_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5260_sd_pull_ctl_disable_tbl;
+ pcr->ms_pull_ctl_enable_tbl = rts5260_ms_pull_ctl_enable_tbl;
+ pcr->ms_pull_ctl_disable_tbl = rts5260_ms_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
+
+ pcr->ops = &rts5260_pcr_ops;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+ option->ltr_l1off_snooze_sspwrgate =
+ LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
+
+ option->ocp_en = 1;
+ if (option->ocp_en)
+ hw_param->interrupt_en |= SD_OC_INT_EN;
+ hw_param->ocp_glitch = SD_OCP_GLITCH_10M | SDVIO_OCP_GLITCH_800U;
+ option->sd_400mA_ocp_thd = RTS5260_DVCC_OCP_THD_550;
+ option->sd_800mA_ocp_thd = RTS5260_DVCC_OCP_THD_970;
+}
diff --git a/drivers/misc/cardreader/rts5260.h b/drivers/misc/cardreader/rts5260.h
new file mode 100644
index 0000000..53a1411
--- /dev/null
+++ b/drivers/misc/cardreader/rts5260.h
@@ -0,0 +1,45 @@
+#ifndef __RTS5260_H__
+#define __RTS5260_H__
+
+#define RTS5260_DVCC_CTRL 0xFF73
+#define RTS5260_DVCC_OCP_EN (0x01 << 7)
+#define RTS5260_DVCC_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DVCC_POWERON (0x01 << 3)
+#define RTS5260_DVCC_OCP_CL_EN (0x01 << 2)
+
+#define RTS5260_DVIO_CTRL 0xFF75
+#define RTS5260_DVIO_OCP_EN (0x01 << 7)
+#define RTS5260_DVIO_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DVIO_POWERON (0x01 << 3)
+#define RTS5260_DVIO_OCP_CL_EN (0x01 << 2)
+
+#define RTS5260_DV331812_CFG 0xFF71
+#define RTS5260_DV331812_OCP_EN (0x01 << 7)
+#define RTS5260_DV331812_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DV331812_POWERON (0x01 << 3)
+#define RTS5260_DV331812_SEL (0x01 << 2)
+#define RTS5260_DV331812_VDD1 (0x01 << 2)
+#define RTS5260_DV331812_VDD2 (0x00 << 2)
+
+#define RTS5260_DV331812_OCP_THD_120 (0x00 << 4)
+#define RTS5260_DV331812_OCP_THD_140 (0x01 << 4)
+#define RTS5260_DV331812_OCP_THD_160 (0x02 << 4)
+#define RTS5260_DV331812_OCP_THD_180 (0x03 << 4)
+#define RTS5260_DV331812_OCP_THD_210 (0x04 << 4)
+#define RTS5260_DV331812_OCP_THD_240 (0x05 << 4)
+#define RTS5260_DV331812_OCP_THD_270 (0x06 << 4)
+#define RTS5260_DV331812_OCP_THD_300 (0x07 << 4)
+
+#define RTS5260_DVIO_OCP_THD_250 (0x00 << 4)
+#define RTS5260_DVIO_OCP_THD_300 (0x01 << 4)
+#define RTS5260_DVIO_OCP_THD_350 (0x02 << 4)
+#define RTS5260_DVIO_OCP_THD_400 (0x03 << 4)
+#define RTS5260_DVIO_OCP_THD_450 (0x04 << 4)
+#define RTS5260_DVIO_OCP_THD_500 (0x05 << 4)
+#define RTS5260_DVIO_OCP_THD_550 (0x06 << 4)
+#define RTS5260_DVIO_OCP_THD_600 (0x07 << 4)
+
+#define RTS5260_DVCC_OCP_THD_550 (0x00 << 4)
+#define RTS5260_DVCC_OCP_THD_970 (0x05 << 4)
+
+#endif
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 590fb9a..fd09b09 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -29,7 +29,7 @@
#include <linux/idr.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <linux/mmc/card.h>
#include <asm/unaligned.h>
@@ -62,6 +62,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -334,6 +335,9 @@ EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
{
+ if (pcr->ops->stop_cmd)
+ return pcr->ops->stop_cmd(pcr);
+
rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
@@ -826,7 +830,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
return err;
/* Wait SSC clock stable */
- udelay(10);
+ udelay(SSC_CLOCK_STABLE_WAIT);
err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
if (err < 0)
return err;
@@ -963,6 +967,20 @@ static void rtsx_pci_card_detect(struct work_struct *work)
pcr->slots[RTSX_MS_CARD].p_dev);
}
+void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->process_ocp)
+ pcr->ops->process_ocp(pcr);
+}
+
+int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
+{
+ if (pcr->option.ocp_en)
+ rtsx_pci_process_ocp(pcr);
+
+ return 0;
+}
+
static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
{
struct rtsx_pcr *pcr = dev_id;
@@ -987,6 +1005,9 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
int_reg &= (pcr->bier | 0x7FFFFF);
+ if (int_reg & SD_OC_INT)
+ rtsx_pci_process_ocp_interrupt(pcr);
+
if (int_reg & SD_INT) {
if (int_reg & SD_EXIST) {
pcr->card_inserted |= SD_EXIST;
@@ -1119,6 +1140,102 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
}
#endif
+void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
+
+ if (pcr->ops->enable_ocp)
+ pcr->ops->enable_ocp(pcr);
+ else
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+
+}
+
+void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
+
+ if (pcr->ops->disable_ocp)
+ pcr->ops->disable_ocp(pcr);
+ else
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+}
+
+void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->init_ocp) {
+ pcr->ops->init_ocp(pcr);
+ } else {
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ if (option->ocp_en) {
+ u8 val = option->sd_400mA_ocp_thd;
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
+ rtsx_pci_write_register(pcr, REG_OCPPARA1,
+ SD_OCP_TIME_MASK, SD_OCP_TIME_800);
+ rtsx_pci_write_register(pcr, REG_OCPPARA2,
+ SD_OCP_THD_MASK, val);
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH,
+ SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
+ rtsx_pci_enable_ocp(pcr);
+ } else {
+ /* OC power down */
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+ }
+ }
+}
+
+int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+ if (pcr->ops->get_ocpstat)
+ return pcr->ops->get_ocpstat(pcr, val);
+ else
+ return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
+}
+
+void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->clear_ocpstat) {
+ pcr->ops->clear_ocpstat(pcr);
+ } else {
+ u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ }
+}
+
+int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
+ MS_CLK_EN | SD40_CLK_EN, 0);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+
+ rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
+
+ msleep(50);
+
+ rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
+
+ return 0;
+}
+
+int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
+ MS_CLK_EN | SD40_CLK_EN, 0);
+
+ rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
+
+ rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
+ rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
+
+ return 0;
+}
+
static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
{
int err;
@@ -1189,6 +1306,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
case PID_5250:
case PID_524A:
case PID_525A:
+ case PID_5260:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
@@ -1265,6 +1383,9 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5286:
rtl8402_init_params(pcr);
break;
+ case 0x5260:
+ rts5260_init_params(pcr);
+ break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
@@ -1543,6 +1664,9 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev)
rtsx_pci_power_off(pcr, HOST_ENTER_S1);
pci_disable_device(pcidev);
+ free_irq(pcr->irq, (void *)pcr);
+ if (pcr->msi_en)
+ pci_disable_msi(pcr->pci);
}
#else /* CONFIG_PM */
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index ec784e0..6ea1655 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -22,7 +22,7 @@
#ifndef __RTSX_PCR_H
#define __RTSX_PCR_H
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#define MIN_DIV_N_PCR 80
#define MAX_DIV_N_PCR 208
@@ -44,6 +44,8 @@
#define ASPM_MASK_NEG 0xFC
#define MASK_8_BIT_DEF 0xFF
+#define SSC_CLOCK_STABLE_WAIT 130
+
int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
@@ -57,6 +59,7 @@ void rts5249_init_params(struct rtsx_pcr *pcr);
void rts524a_init_params(struct rtsx_pcr *pcr);
void rts525a_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
+void rts5260_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
{
@@ -99,5 +102,12 @@ do { \
int rtsx_gops_pm_reset(struct rtsx_pcr *pcr);
int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency);
int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val);
+void rtsx_pci_init_ocp(struct rtsx_pcr *pcr);
+void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr);
+void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr);
+int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val);
+void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr);
+int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr);
+int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr);
#endif
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index 59d61b0..b97903f 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -23,7 +23,7 @@
#include <linux/usb.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
static int polling_pipe = 1;
module_param(polling_pipe, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 305a7a4..4d63ac8 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -562,7 +562,7 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
static int at24_read(void *priv, unsigned int off, void *val, size_t count)
{
struct at24_data *at24 = priv;
- struct i2c_client *client;
+ struct device *dev = &at24->client[0]->dev;
char *buf = val;
int ret;
@@ -572,11 +572,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
if (off + count > at24->chip.byte_len)
return -EINVAL;
- client = at24_translate_offset(at24, &off);
-
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
@@ -592,7 +590,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
status = at24->read_func(at24, buf, off, count);
if (status < 0) {
mutex_unlock(&at24->lock);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
return status;
}
buf += status;
@@ -602,7 +600,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
mutex_unlock(&at24->lock);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
return 0;
}
@@ -610,7 +608,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
static int at24_write(void *priv, unsigned int off, void *val, size_t count)
{
struct at24_data *at24 = priv;
- struct i2c_client *client;
+ struct device *dev = &at24->client[0]->dev;
char *buf = val;
int ret;
@@ -620,11 +618,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
if (off + count > at24->chip.byte_len)
return -EINVAL;
- client = at24_translate_offset(at24, &off);
-
- ret = pm_runtime_get_sync(&client->dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- pm_runtime_put_noidle(&client->dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
@@ -640,7 +636,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
status = at24->write_func(at24, buf, off, count);
if (status < 0) {
mutex_unlock(&at24->lock);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
return status;
}
buf += status;
@@ -650,7 +646,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
mutex_unlock(&at24->lock);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(dev);
return 0;
}
@@ -880,7 +876,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
at24->nvmem_config.reg_read = at24_read;
at24->nvmem_config.reg_write = at24_write;
at24->nvmem_config.priv = at24;
- at24->nvmem_config.stride = 4;
+ at24->nvmem_config.stride = 1;
at24->nvmem_config.word_size = 1;
at24->nvmem_config.size = chip.byte_len;
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index eda38cb..41f2a9f 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -32,7 +32,7 @@
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/miscdevice.h>
-#include <linux/pti.h>
+#include <linux/intel-pti.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index ccfa98a..20135a5 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -63,7 +63,13 @@ MODULE_ALIAS("mmc:block");
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
-#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+/*
+ * Set a 10 second timeout for polling write request busy state. Note, mmc core
+ * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10
+ * second software timer to timeout the whole request, so 10 seconds should be
+ * ample.
+ */
+#define MMC_BLK_TIMEOUT_MS (10 * 1000)
#define MMC_SANITIZE_REQ_TIMEOUT 240000
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
@@ -112,6 +118,7 @@ struct mmc_blk_data {
#define MMC_BLK_WRITE BIT(1)
#define MMC_BLK_DISCARD BIT(2)
#define MMC_BLK_SECDISCARD BIT(3)
+#define MMC_BLK_CQE_RECOVERY BIT(4)
/*
* Only set in main mmc_blk_data associated
@@ -189,7 +196,7 @@ static void mmc_blk_put(struct mmc_blk_data *md)
md->usage--;
if (md->usage == 0) {
int devidx = mmc_get_devidx(md->disk);
- blk_cleanup_queue(md->queue.queue);
+ blk_put_queue(md->queue.queue);
ida_simple_remove(&mmc_blk_ida, devidx);
put_disk(md->disk);
kfree(md);
@@ -921,14 +928,54 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
return 0;
}
+static unsigned int mmc_blk_clock_khz(struct mmc_host *host)
+{
+ if (host->actual_clock)
+ return host->actual_clock / 1000;
+
+ /* Clock may be subject to a divisor, fudge it by a factor of 2. */
+ if (host->ios.clock)
+ return host->ios.clock / 2000;
+
+ /* How can there be no clock */
+ WARN_ON_ONCE(1);
+ return 100; /* 100 kHz is minimum possible value */
+}
+
+static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
+ struct mmc_data *data)
+{
+ unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000);
+ unsigned int khz;
+
+ if (data->timeout_clks) {
+ khz = mmc_blk_clock_khz(host);
+ ms += DIV_ROUND_UP(data->timeout_clks, khz);
+ }
+
+ return ms;
+}
+
+static inline bool mmc_blk_in_tran_state(u32 status)
+{
+ /*
+ * Some cards mishandle the status bits, so make sure to check both the
+ * busy indication and the card state.
+ */
+ return status & R1_READY_FOR_DATA &&
+ (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
+}
+
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
- bool hw_busy_detect, struct request *req, bool *gen_err)
+ struct request *req, u32 *resp_errs)
{
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
int err = 0;
u32 status;
do {
+ bool done = time_after(jiffies, timeout);
+
err = __mmc_send_status(card, &status, 5);
if (err) {
pr_err("%s: error %d requesting status\n",
@@ -936,25 +983,18 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
return err;
}
- if (status & R1_ERROR) {
- pr_err("%s: %s: error sending status cmd, status %#x\n",
- req->rq_disk->disk_name, __func__, status);
- *gen_err = true;
- }
-
- /* We may rely on the host hw to handle busy detection.*/
- if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
- hw_busy_detect)
- break;
+ /* Accumulate any response error bits seen */
+ if (resp_errs)
+ *resp_errs |= status;
/*
* Timeout if the device never becomes ready for data and never
* leaves the program state.
*/
- if (time_after(jiffies, timeout)) {
- pr_err("%s: Card stuck in programming state! %s %s\n",
+ if (done) {
+ pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n",
mmc_hostname(card->host),
- req->rq_disk->disk_name, __func__);
+ req->rq_disk->disk_name, __func__, status);
return -ETIMEDOUT;
}
@@ -963,229 +1003,11 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
* so make sure to check both the busy
* indication and the card state.
*/
- } while (!(status & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+ } while (!mmc_blk_in_tran_state(status));
return err;
}
-static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
- struct request *req, bool *gen_err, u32 *stop_status)
-{
- struct mmc_host *host = card->host;
- struct mmc_command cmd = {};
- int err;
- bool use_r1b_resp = rq_data_dir(req) == WRITE;
-
- /*
- * Normally we use R1B responses for WRITE, but in cases where the host
- * has specified a max_busy_timeout we need to validate it. A failure
- * means we need to prevent the host from doing hw busy detection, which
- * is done by converting to a R1 response instead.
- */
- if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
- use_r1b_resp = false;
-
- cmd.opcode = MMC_STOP_TRANSMISSION;
- if (use_r1b_resp) {
- cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = timeout_ms;
- } else {
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
- }
-
- err = mmc_wait_for_cmd(host, &cmd, 5);
- if (err)
- return err;
-
- *stop_status = cmd.resp[0];
-
- /* No need to check card status in case of READ. */
- if (rq_data_dir(req) == READ)
- return 0;
-
- if (!mmc_host_is_spi(host) &&
- (*stop_status & R1_ERROR)) {
- pr_err("%s: %s: general error sending stop command, resp %#x\n",
- req->rq_disk->disk_name, __func__, *stop_status);
- *gen_err = true;
- }
-
- return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
-}
-
-#define ERR_NOMEDIUM 3
-#define ERR_RETRY 2
-#define ERR_ABORT 1
-#define ERR_CONTINUE 0
-
-static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
- bool status_valid, u32 status)
-{
- switch (error) {
- case -EILSEQ:
- /* response crc error, retry the r/w cmd */
- pr_err("%s: %s sending %s command, card status %#x\n",
- req->rq_disk->disk_name, "response CRC error",
- name, status);
- return ERR_RETRY;
-
- case -ETIMEDOUT:
- pr_err("%s: %s sending %s command, card status %#x\n",
- req->rq_disk->disk_name, "timed out", name, status);
-
- /* If the status cmd initially failed, retry the r/w cmd */
- if (!status_valid) {
- pr_err("%s: status not valid, retrying timeout\n",
- req->rq_disk->disk_name);
- return ERR_RETRY;
- }
-
- /*
- * If it was a r/w cmd crc error, or illegal command
- * (eg, issued in wrong state) then retry - we should
- * have corrected the state problem above.
- */
- if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
- pr_err("%s: command error, retrying timeout\n",
- req->rq_disk->disk_name);
- return ERR_RETRY;
- }
-
- /* Otherwise abort the command */
- return ERR_ABORT;
-
- default:
- /* We don't understand the error code the driver gave us */
- pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
- req->rq_disk->disk_name, error, status);
- return ERR_ABORT;
- }
-}
-
-/*
- * Initial r/w and stop cmd error recovery.
- * We don't know whether the card received the r/w cmd or not, so try to
- * restore things back to a sane state. Essentially, we do this as follows:
- * - Obtain card status. If the first attempt to obtain card status fails,
- * the status word will reflect the failed status cmd, not the failed
- * r/w cmd. If we fail to obtain card status, it suggests we can no
- * longer communicate with the card.
- * - Check the card state. If the card received the cmd but there was a
- * transient problem with the response, it might still be in a data transfer
- * mode. Try to send it a stop command. If this fails, we can't recover.
- * - If the r/w cmd failed due to a response CRC error, it was probably
- * transient, so retry the cmd.
- * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
- * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
- * illegal cmd, retry.
- * Otherwise we don't understand what happened, so abort.
- */
-static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
- struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
-{
- bool prev_cmd_status_valid = true;
- u32 status, stop_status = 0;
- int err, retry;
-
- if (mmc_card_removed(card))
- return ERR_NOMEDIUM;
-
- /*
- * Try to get card status which indicates both the card state
- * and why there was no response. If the first attempt fails,
- * we can't be sure the returned status is for the r/w command.
- */
- for (retry = 2; retry >= 0; retry--) {
- err = __mmc_send_status(card, &status, 0);
- if (!err)
- break;
-
- /* Re-tune if needed */
- mmc_retune_recheck(card->host);
-
- prev_cmd_status_valid = false;
- pr_err("%s: error %d sending status command, %sing\n",
- req->rq_disk->disk_name, err, retry ? "retry" : "abort");
- }
-
- /* We couldn't get a response from the card. Give up. */
- if (err) {
- /* Check if the card is removed */
- if (mmc_detect_card_removed(card->host))
- return ERR_NOMEDIUM;
- return ERR_ABORT;
- }
-
- /* Flag ECC errors */
- if ((status & R1_CARD_ECC_FAILED) ||
- (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
- (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
- *ecc_err = true;
-
- /* Flag General errors */
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
- if ((status & R1_ERROR) ||
- (brq->stop.resp[0] & R1_ERROR)) {
- pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, __func__,
- brq->stop.resp[0], status);
- *gen_err = true;
- }
-
- /*
- * Check the current card state. If it is in some data transfer
- * mode, tell it to stop (and hopefully transition back to TRAN.)
- */
- if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
- R1_CURRENT_STATE(status) == R1_STATE_RCV) {
- err = send_stop(card,
- DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
- req, gen_err, &stop_status);
- if (err) {
- pr_err("%s: error %d sending stop command\n",
- req->rq_disk->disk_name, err);
- /*
- * If the stop cmd also timed out, the card is probably
- * not present, so abort. Other errors are bad news too.
- */
- return ERR_ABORT;
- }
-
- if (stop_status & R1_CARD_ECC_FAILED)
- *ecc_err = true;
- }
-
- /* Check for set block count errors */
- if (brq->sbc.error)
- return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
- prev_cmd_status_valid, status);
-
- /* Check for r/w command errors */
- if (brq->cmd.error)
- return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
- prev_cmd_status_valid, status);
-
- /* Data errors */
- if (!brq->stop.error)
- return ERR_CONTINUE;
-
- /* Now for stop errors. These aren't fatal to the transfer. */
- pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq->stop.error,
- brq->cmd.resp[0], status);
-
- /*
- * Subsitute in our own stop status as this will give the error
- * state which happened during the execution of the r/w command.
- */
- if (stop_status) {
- brq->stop.resp[0] = stop_status;
- brq->stop.error = 0;
- }
- return ERR_CONTINUE;
-}
-
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
int type)
{
@@ -1281,7 +1103,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
break;
}
mq_rq->drv_op_result = ret;
- blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+ blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -1324,7 +1146,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
else
mmc_blk_reset_success(md, type);
fail:
- blk_end_request(req, status, blk_rq_bytes(req));
+ blk_mq_end_request(req, status);
}
static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
@@ -1394,7 +1216,7 @@ out_retry:
if (!err)
mmc_blk_reset_success(md, type);
out:
- blk_end_request(req, status, blk_rq_bytes(req));
+ blk_mq_end_request(req, status);
}
static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
@@ -1404,7 +1226,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
int ret = 0;
ret = mmc_flush_cache(card);
- blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+ blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
/*
@@ -1430,15 +1252,18 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
}
}
-#define CMD_ERRORS \
- (R1_OUT_OF_RANGE | /* Command argument out of range */ \
- R1_ADDRESS_ERROR | /* Misaligned address */ \
+#define CMD_ERRORS_EXCL_OOR \
+ (R1_ADDRESS_ERROR | /* Misaligned address */ \
R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
R1_WP_VIOLATION | /* Tried to write to protected block */ \
R1_CARD_ECC_FAILED | /* Card ECC failed */ \
R1_CC_ERROR | /* Card controller error */ \
R1_ERROR) /* General/unknown error */
+#define CMD_ERRORS \
+ (CMD_ERRORS_EXCL_OOR | \
+ R1_OUT_OF_RANGE) /* Command argument out of range */ \
+
static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
{
u32 val;
@@ -1481,116 +1306,6 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
}
}
-static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
- struct mmc_async_req *areq)
-{
- struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
- areq);
- struct mmc_blk_request *brq = &mq_mrq->brq;
- struct request *req = mmc_queue_req_to_req(mq_mrq);
- int need_retune = card->host->need_retune;
- bool ecc_err = false;
- bool gen_err = false;
-
- /*
- * sbc.error indicates a problem with the set block count
- * command. No data will have been transferred.
- *
- * cmd.error indicates a problem with the r/w command. No
- * data will have been transferred.
- *
- * stop.error indicates a problem with the stop command. Data
- * may have been transferred, or may still be transferring.
- */
-
- mmc_blk_eval_resp_error(brq);
-
- if (brq->sbc.error || brq->cmd.error ||
- brq->stop.error || brq->data.error) {
- switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
- case ERR_RETRY:
- return MMC_BLK_RETRY;
- case ERR_ABORT:
- return MMC_BLK_ABORT;
- case ERR_NOMEDIUM:
- return MMC_BLK_NOMEDIUM;
- case ERR_CONTINUE:
- break;
- }
- }
-
- /*
- * Check for errors relating to the execution of the
- * initial command - such as address errors. No data
- * has been transferred.
- */
- if (brq->cmd.resp[0] & CMD_ERRORS) {
- pr_err("%s: r/w command failed, status = %#x\n",
- req->rq_disk->disk_name, brq->cmd.resp[0]);
- return MMC_BLK_ABORT;
- }
-
- /*
- * Everything else is either success, or a data error of some
- * kind. If it was a write, we may have transitioned to
- * program mode, which we have to wait for it to complete.
- */
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
- int err;
-
- /* Check stop command response */
- if (brq->stop.resp[0] & R1_ERROR) {
- pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
- req->rq_disk->disk_name, __func__,
- brq->stop.resp[0]);
- gen_err = true;
- }
-
- err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
- &gen_err);
- if (err)
- return MMC_BLK_CMD_ERR;
- }
-
- /* if general error occurs, retry the write operation. */
- if (gen_err) {
- pr_warn("%s: retrying write for general error\n",
- req->rq_disk->disk_name);
- return MMC_BLK_RETRY;
- }
-
- /* Some errors (ECC) are flagged on the next commmand, so check stop, too */
- if (brq->data.error || brq->stop.error) {
- if (need_retune && !brq->retune_retry_done) {
- pr_debug("%s: retrying because a re-tune was needed\n",
- req->rq_disk->disk_name);
- brq->retune_retry_done = 1;
- return MMC_BLK_RETRY;
- }
- pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq->data.error ?: brq->stop.error,
- (unsigned)blk_rq_pos(req),
- (unsigned)blk_rq_sectors(req),
- brq->cmd.resp[0], brq->stop.resp[0]);
-
- if (rq_data_dir(req) == READ) {
- if (ecc_err)
- return MMC_BLK_ECC_ERR;
- return MMC_BLK_DATA_ERR;
- } else {
- return MMC_BLK_CMD_ERR;
- }
- }
-
- if (!brq->data.bytes_xfered)
- return MMC_BLK_RETRY;
-
- if (blk_rq_bytes(req) != brq->data.bytes_xfered)
- return MMC_BLK_PARTIAL;
-
- return MMC_BLK_SUCCESS;
-}
-
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
int disable_multi, bool *do_rel_wr_p,
bool *do_data_tag_p)
@@ -1706,8 +1421,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.sg_len = i;
}
- mqrq->areq.mrq = &brq->mrq;
-
if (do_rel_wr_p)
*do_rel_wr_p = do_rel_wr;
@@ -1715,6 +1428,138 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
*do_data_tag_p = do_data_tag;
}
+#define MMC_CQE_RETRIES 2
+
+static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct request_queue *q = req->q;
+ struct mmc_host *host = mq->card->host;
+ unsigned long flags;
+ bool put_card;
+ int err;
+
+ mmc_cqe_post_req(host, mrq);
+
+ if (mrq->cmd && mrq->cmd->error)
+ err = mrq->cmd->error;
+ else if (mrq->data && mrq->data->error)
+ err = mrq->data->error;
+ else
+ err = 0;
+
+ if (err) {
+ if (mqrq->retries++ < MMC_CQE_RETRIES)
+ blk_mq_requeue_request(req, true);
+ else
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ } else if (mrq->data) {
+ if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
+ } else {
+ blk_mq_end_request(req, BLK_STS_OK);
+ }
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+ mmc_cqe_check_busy(mq);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (!mq->cqe_busy)
+ blk_mq_run_hw_queues(q, true);
+
+ if (put_card)
+ mmc_put_card(mq->card, &mq->ctx);
+}
+
+void mmc_blk_cqe_recovery(struct mmc_queue *mq)
+{
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ int err;
+
+ pr_debug("%s: CQE recovery start\n", mmc_hostname(host));
+
+ err = mmc_cqe_recovery(host);
+ if (err)
+ mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
+ else
+ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+
+ pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
+}
+
+static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->in_recovery)
+ mmc_blk_cqe_complete_rq(mq, req);
+ else
+ blk_mq_complete_request(req);
+}
+
+static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mrq->done = mmc_blk_cqe_req_done;
+ mrq->recovery_notifier = mmc_cqe_recovery_notifier;
+
+ return mmc_cqe_start_req(host, mrq);
+}
+
+static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq,
+ struct request *req)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+
+ memset(brq, 0, sizeof(*brq));
+
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.tag = req->tag;
+
+ return &brq->mrq;
+}
+
+static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req);
+
+ mrq->cmd->opcode = MMC_SWITCH;
+ mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_FLUSH_CACHE << 16) |
+ (1 << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B;
+
+ return mmc_blk_cqe_start_req(mq->card->host, mrq);
+}
+
+static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+
+ mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
+
+ return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
+}
+
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
@@ -1779,318 +1624,637 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
brq->mrq.sbc = &brq->sbc;
}
+}
+
+#define MMC_MAX_RETRIES 5
+#define MMC_DATA_RETRIES 2
+#define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1)
- mqrq->areq.err_check = mmc_blk_err_check;
+static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout)
+{
+ struct mmc_command cmd = {
+ .opcode = MMC_STOP_TRANSMISSION,
+ .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC,
+ /* Some hosts wait for busy anyway, so provide a busy timeout */
+ .busy_timeout = timeout,
+ };
+
+ return mmc_wait_for_cmd(card->host, &cmd, 5);
}
-static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
- struct mmc_blk_request *brq, struct request *req,
- bool old_req_pending)
+static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
{
- bool req_pending;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data);
+ int err;
- /*
- * If this is an SD card and we're writing, we can first
- * mark the known good sectors as ok.
- *
- * If the card is not SD, we can still ok written sectors
- * as reported by the controller (which might be less than
- * the real number of written sectors, but never more).
- */
- if (mmc_card_sd(card)) {
- u32 blocks;
+ mmc_retune_hold_now(card->host);
+
+ mmc_blk_send_stop(card, timeout);
+
+ err = card_busy_detect(card, timeout, req, NULL);
+
+ mmc_retune_release(card->host);
+
+ return err;
+}
+
+#define MMC_READ_SINGLE_RETRIES 2
+
+/* Single sector read during recovery */
+static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ blk_status_t error = BLK_STS_OK;
+ int retries = 0;
+
+ do {
+ u32 status;
int err;
- err = mmc_sd_num_wr_blocks(card, &blocks);
+ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
+
+ mmc_wait_for_req(host, mrq);
+
+ err = mmc_send_status(card, &status);
if (err)
- req_pending = old_req_pending;
+ goto error_exit;
+
+ if (!mmc_host_is_spi(host) &&
+ !mmc_blk_in_tran_state(status)) {
+ err = mmc_blk_fix_state(card, req);
+ if (err)
+ goto error_exit;
+ }
+
+ if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
+ continue;
+
+ retries = 0;
+
+ if (mrq->cmd->error ||
+ mrq->data->error ||
+ (!mmc_host_is_spi(host) &&
+ (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS)))
+ error = BLK_STS_IOERR;
else
- req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9);
- } else {
- req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered);
- }
- return req_pending;
+ error = BLK_STS_OK;
+
+ } while (blk_update_request(req, error, 512));
+
+ return;
+
+error_exit:
+ mrq->data->bytes_xfered = 0;
+ blk_update_request(req, BLK_STS_IOERR, 512);
+ /* Let it try the remaining request again */
+ if (mqrq->retries > MMC_MAX_RETRIES - 1)
+ mqrq->retries = MMC_MAX_RETRIES - 1;
}
-static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
- struct request *req,
- struct mmc_queue_req *mqrq)
+static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
{
- if (mmc_card_removed(card))
- req->rq_flags |= RQF_QUIET;
- while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req)));
- mq->qcnt--;
+ return !!brq->mrq.sbc;
}
-/**
- * mmc_blk_rw_try_restart() - tries to restart the current async request
- * @mq: the queue with the card and host to restart
- * @req: a new request that want to be started after the current one
+static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq)
+{
+ return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR;
+}
+
+/*
+ * Check for errors the host controller driver might not have seen such as
+ * response mode errors or invalid card state.
+ */
+static bool mmc_blk_status_error(struct request *req, u32 status)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct mmc_queue *mq = req->q->queuedata;
+ u32 stop_err_bits;
+
+ if (mmc_host_is_spi(mq->card->host))
+ return false;
+
+ stop_err_bits = mmc_blk_stop_err_bits(brq);
+
+ return brq->cmd.resp[0] & CMD_ERRORS ||
+ brq->stop.resp[0] & stop_err_bits ||
+ status & stop_err_bits ||
+ (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status));
+}
+
+static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
+{
+ return !brq->sbc.error && !brq->cmd.error &&
+ !(brq->cmd.resp[0] & CMD_ERRORS);
+}
+
+/*
+ * Requests are completed by mmc_blk_mq_complete_rq() which sets simple
+ * policy:
+ * 1. A request that has transferred at least some data is considered
+ * successful and will be requeued if there is remaining data to
+ * transfer.
+ * 2. Otherwise the number of retries is incremented and the request
+ * will be requeued if there are remaining retries.
+ * 3. Otherwise the request will be errored out.
+ * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and
+ * mqrq->retries. So there are only 4 possible actions here:
+ * 1. do not accept the bytes_xfered value i.e. set it to zero
+ * 2. change mqrq->retries to determine the number of retries
+ * 3. try to reset the card
+ * 4. read one sector at a time
*/
-static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
- struct mmc_queue_req *mqrq)
+static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
{
- if (!req)
+ int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = mq->card;
+ u32 status;
+ u32 blocks;
+ int err;
+
+ /*
+ * Some errors the host driver might not have seen. Set the number of
+ * bytes transferred to zero in that case.
+ */
+ err = __mmc_send_status(card, &status, 0);
+ if (err || mmc_blk_status_error(req, status))
+ brq->data.bytes_xfered = 0;
+
+ mmc_retune_release(card->host);
+
+ /*
+ * Try again to get the status. This also provides an opportunity for
+ * re-tuning.
+ */
+ if (err)
+ err = __mmc_send_status(card, &status, 0);
+
+ /*
+ * Nothing more to do after the number of bytes transferred has been
+ * updated and there is no card.
+ */
+ if (err && mmc_detect_card_removed(card->host))
return;
+ /* Try to get back to "tran" state */
+ if (!mmc_host_is_spi(mq->card->host) &&
+ (err || !mmc_blk_in_tran_state(status)))
+ err = mmc_blk_fix_state(mq->card, req);
+
/*
- * If the card was removed, just cancel everything and return.
+ * Special case for SD cards where the card might record the number of
+ * blocks written.
*/
- if (mmc_card_removed(mq->card)) {
- req->rq_flags |= RQF_QUIET;
- blk_end_request_all(req, BLK_STS_IOERR);
- mq->qcnt--; /* FIXME: just set to 0? */
+ if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) &&
+ rq_data_dir(req) == WRITE) {
+ if (mmc_sd_num_wr_blocks(card, &blocks))
+ brq->data.bytes_xfered = 0;
+ else
+ brq->data.bytes_xfered = blocks << 9;
+ }
+
+ /* Reset if the card is in a bad state */
+ if (!mmc_host_is_spi(mq->card->host) &&
+ err && mmc_blk_reset(md, card->host, type)) {
+ pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
+ mqrq->retries = MMC_NO_RETRIES;
+ return;
+ }
+
+ /*
+ * If anything was done, just return and if there is anything remaining
+ * on the request it will get requeued.
+ */
+ if (brq->data.bytes_xfered)
+ return;
+
+ /* Reset before last retry */
+ if (mqrq->retries + 1 == MMC_MAX_RETRIES)
+ mmc_blk_reset(md, card->host, type);
+
+ /* Command errors fail fast, so use all MMC_MAX_RETRIES */
+ if (brq->sbc.error || brq->cmd.error)
+ return;
+
+ /* Reduce the remaining retries for data errors */
+ if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) {
+ mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES;
+ return;
+ }
+
+ /* FIXME: Missing single sector read for large sector size */
+ if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
+ brq->data.blocks > 1) {
+ /* Read one sector at a time */
+ mmc_blk_read_single(mq, req);
return;
}
- /* Else proceed and try to restart the current async request */
- mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
- mmc_start_areq(mq->card->host, &mqrq->areq, NULL);
}
-static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
+static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
{
- struct mmc_blk_data *md = mq->blkdata;
- struct mmc_card *card = md->queue.card;
- struct mmc_blk_request *brq;
- int disable_multi = 0, retry = 0, type, retune_retry_done = 0;
- enum mmc_blk_status status;
- struct mmc_queue_req *mqrq_cur = NULL;
- struct mmc_queue_req *mq_rq;
- struct request *old_req;
- struct mmc_async_req *new_areq;
- struct mmc_async_req *old_areq;
- bool req_pending = true;
+ mmc_blk_eval_resp_error(brq);
+
+ return brq->sbc.error || brq->cmd.error || brq->stop.error ||
+ brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
+}
+
+static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ u32 status = 0;
+ int err;
+
+ if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
+ return 0;
+
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status);
+
+ /*
+ * Do not assume data transferred correctly if there are any error bits
+ * set.
+ */
+ if (status & mmc_blk_stop_err_bits(&mqrq->brq)) {
+ mqrq->brq.data.bytes_xfered = 0;
+ err = err ? err : -EIO;
+ }
+
+ /* Copy the exception bit so it will be seen later on */
+ if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT)
+ mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
+
+ return err;
+}
- if (new_req) {
- mqrq_cur = req_to_mmc_queue_req(new_req);
- mq->qcnt++;
+static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq,
+ struct request *req)
+{
+ int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+
+ mmc_blk_reset_success(mq->blkdata, type);
+}
+
+static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ unsigned int nr_bytes = mqrq->brq.data.bytes_xfered;
+
+ if (nr_bytes) {
+ if (blk_update_request(req, BLK_STS_OK, nr_bytes))
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
+ } else if (!blk_rq_bytes(req)) {
+ __blk_mq_end_request(req, BLK_STS_IOERR);
+ } else if (mqrq->retries++ < MMC_MAX_RETRIES) {
+ blk_mq_requeue_request(req, true);
+ } else {
+ if (mmc_card_removed(mq->card))
+ req->rq_flags |= RQF_QUIET;
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ }
+}
+
+static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq,
+ struct mmc_queue_req *mqrq)
+{
+ return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) &&
+ (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT ||
+ mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT);
+}
+
+static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
+ struct mmc_queue_req *mqrq)
+{
+ if (mmc_blk_urgent_bkops_needed(mq, mqrq))
+ mmc_start_bkops(mq->card, true);
+}
+
+void mmc_blk_mq_complete(struct request *req)
+{
+ struct mmc_queue *mq = req->q->queuedata;
+
+ if (mq->use_cqe)
+ mmc_blk_cqe_complete_rq(mq, req);
+ else
+ mmc_blk_mq_complete_rq(mq, req);
+}
+
+static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+
+ if (mmc_blk_rq_error(&mqrq->brq) ||
+ mmc_blk_card_busy(mq->card, req)) {
+ mmc_blk_mq_rw_recovery(mq, req);
+ } else {
+ mmc_blk_rw_reset_success(mq, req);
+ mmc_retune_release(host);
+ }
+
+ mmc_blk_urgent_bkops(mq, mqrq);
+}
+
+static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
+{
+ struct request_queue *q = req->q;
+ unsigned long flags;
+ bool put_card;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (put_card)
+ mmc_put_card(mq->card, &mq->ctx);
+}
+
+static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_host *host = mq->card->host;
+
+ mmc_post_req(host, mrq, 0);
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->in_recovery)
+ mmc_blk_mq_complete_rq(mq, req);
+ else
+ blk_mq_complete_request(req);
+
+ mmc_blk_mq_dec_in_flight(mq, req);
+}
+
+void mmc_blk_mq_recovery(struct mmc_queue *mq)
+{
+ struct request *req = mq->recovery_req;
+ struct mmc_host *host = mq->card->host;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+
+ mq->recovery_req = NULL;
+ mq->rw_wait = false;
+
+ if (mmc_blk_rq_error(&mqrq->brq)) {
+ mmc_retune_hold_now(host);
+ mmc_blk_mq_rw_recovery(mq, req);
}
- if (!mq->qcnt)
+ mmc_blk_urgent_bkops(mq, mqrq);
+
+ mmc_blk_mq_post_req(mq, req);
+}
+
+static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
+ struct request **prev_req)
+{
+ if (mmc_host_done_complete(mq->card->host))
return;
- do {
- if (new_req) {
- /*
- * When 4KB native sector is enabled, only 8 blocks
- * multiple read or write is allowed
- */
- if (mmc_large_sector(card) &&
- !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
- pr_err("%s: Transfer size is not 4KB sector size aligned\n",
- new_req->rq_disk->disk_name);
- mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur);
- return;
- }
+ mutex_lock(&mq->complete_lock);
- mmc_blk_rw_rq_prep(mqrq_cur, card, 0, mq);
- new_areq = &mqrq_cur->areq;
- } else
- new_areq = NULL;
+ if (!mq->complete_req)
+ goto out_unlock;
- old_areq = mmc_start_areq(card->host, new_areq, &status);
- if (!old_areq) {
- /*
- * We have just put the first request into the pipeline
- * and there is nothing more to do until it is
- * complete.
- */
- return;
- }
+ mmc_blk_mq_poll_completion(mq, mq->complete_req);
+
+ if (prev_req)
+ *prev_req = mq->complete_req;
+ else
+ mmc_blk_mq_post_req(mq, mq->complete_req);
+
+ mq->complete_req = NULL;
+
+out_unlock:
+ mutex_unlock(&mq->complete_lock);
+}
+
+void mmc_blk_mq_complete_work(struct work_struct *work)
+{
+ struct mmc_queue *mq = container_of(work, struct mmc_queue,
+ complete_work);
+
+ mmc_blk_mq_complete_prev_req(mq, NULL);
+}
+
+static void mmc_blk_mq_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ unsigned long flags;
+
+ if (!mmc_host_done_complete(host)) {
+ bool waiting;
/*
- * An asynchronous request has been completed and we proceed
- * to handle the result of it.
+ * We cannot complete the request in this context, so record
+ * that there is a request to complete, and that a following
+ * request does not need to wait (although it does need to
+ * complete complete_req first).
*/
- mq_rq = container_of(old_areq, struct mmc_queue_req, areq);
- brq = &mq_rq->brq;
- old_req = mmc_queue_req_to_req(mq_rq);
- type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
-
- switch (status) {
- case MMC_BLK_SUCCESS:
- case MMC_BLK_PARTIAL:
- /*
- * A block was successfully transferred.
- */
- mmc_blk_reset_success(md, type);
+ spin_lock_irqsave(q->queue_lock, flags);
+ mq->complete_req = req;
+ mq->rw_wait = false;
+ waiting = mq->waiting;
+ spin_unlock_irqrestore(q->queue_lock, flags);
- req_pending = blk_end_request(old_req, BLK_STS_OK,
- brq->data.bytes_xfered);
- /*
- * If the blk_end_request function returns non-zero even
- * though all data has been transferred and no errors
- * were returned by the host controller, it's a bug.
- */
- if (status == MMC_BLK_SUCCESS && req_pending) {
- pr_err("%s BUG rq_tot %d d_xfer %d\n",
- __func__, blk_rq_bytes(old_req),
- brq->data.bytes_xfered);
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- return;
- }
- break;
- case MMC_BLK_CMD_ERR:
- req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
- if (mmc_blk_reset(md, card->host, type)) {
- if (req_pending)
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- else
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- if (!req_pending) {
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- break;
- case MMC_BLK_RETRY:
- retune_retry_done = brq->retune_retry_done;
- if (retry++ < 5)
- break;
- /* Fall through */
- case MMC_BLK_ABORT:
- if (!mmc_blk_reset(md, card->host, type))
- break;
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- case MMC_BLK_DATA_ERR: {
- int err;
-
- err = mmc_blk_reset(md, card->host, type);
- if (!err)
- break;
- if (err == -ENODEV) {
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- /* Fall through */
- }
- case MMC_BLK_ECC_ERR:
- if (brq->data.blocks > 1) {
- /* Redo read one sector at a time */
- pr_warn("%s: retrying using single block read\n",
- old_req->rq_disk->disk_name);
- disable_multi = 1;
- break;
- }
- /*
- * After an error, we redo I/O one sector at a
- * time, so we only reach here after trying to
- * read a single sector.
- */
- req_pending = blk_end_request(old_req, BLK_STS_IOERR,
- brq->data.blksz);
- if (!req_pending) {
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- break;
- case MMC_BLK_NOMEDIUM:
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- default:
- pr_err("%s: Unhandled return value (%d)",
- old_req->rq_disk->disk_name, status);
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
+ /*
+ * If 'waiting' then the waiting task will complete this
+ * request, otherwise queue a work to do it. Note that
+ * complete_work may still race with the dispatch of a following
+ * request.
+ */
+ if (waiting)
+ wake_up(&mq->wait);
+ else
+ kblockd_schedule_work(&mq->complete_work);
- if (req_pending) {
- /*
- * In case of a incomplete request
- * prepare it again and resend.
- */
- mmc_blk_rw_rq_prep(mq_rq, card,
- disable_multi, mq);
- mmc_start_areq(card->host,
- &mq_rq->areq, NULL);
- mq_rq->brq.retune_retry_done = retune_retry_done;
- }
- } while (req_pending);
+ return;
+ }
+
+ /* Take the recovery path for errors or urgent background operations */
+ if (mmc_blk_rq_error(&mqrq->brq) ||
+ mmc_blk_urgent_bkops_needed(mq, mqrq)) {
+ spin_lock_irqsave(q->queue_lock, flags);
+ mq->recovery_needed = true;
+ mq->recovery_req = req;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ wake_up(&mq->wait);
+ schedule_work(&mq->recovery_work);
+ return;
+ }
+
+ mmc_blk_rw_reset_success(mq, req);
- mq->qcnt--;
+ mq->rw_wait = false;
+ wake_up(&mq->wait);
+
+ mmc_blk_mq_post_req(mq, req);
}
-void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
+{
+ struct request_queue *q = mq->queue;
+ unsigned long flags;
+ bool done;
+
+ /*
+ * Wait while there is another request in progress, but not if recovery
+ * is needed. Also indicate whether there is a request waiting to start.
+ */
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (mq->recovery_needed) {
+ *err = -EBUSY;
+ done = true;
+ } else {
+ done = !mq->rw_wait;
+ }
+ mq->waiting = !done;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return done;
+}
+
+static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req)
+{
+ int err = 0;
+
+ wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err));
+
+ /* Always complete the previous request if there is one */
+ mmc_blk_mq_complete_prev_req(mq, prev_req);
+
+ return err;
+}
+
+static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+ struct request *prev_req = NULL;
+ int err = 0;
+
+ mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
+
+ mqrq->brq.mrq.done = mmc_blk_mq_req_done;
+
+ mmc_pre_req(host, &mqrq->brq.mrq);
+
+ err = mmc_blk_rw_wait(mq, &prev_req);
+ if (err)
+ goto out_post_req;
+
+ mq->rw_wait = true;
+
+ err = mmc_start_request(host, &mqrq->brq.mrq);
+
+ if (prev_req)
+ mmc_blk_mq_post_req(mq, prev_req);
+
+ if (err)
+ mq->rw_wait = false;
+
+ /* Release re-tuning here where there is no synchronization required */
+ if (err || mmc_host_done_complete(host))
+ mmc_retune_release(host);
+
+out_post_req:
+ if (err)
+ mmc_post_req(host, &mqrq->brq.mrq, err);
+
+ return err;
+}
+
+static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
+{
+ if (mq->use_cqe)
+ return host->cqe_ops->cqe_wait_for_idle(host);
+
+ return mmc_blk_rw_wait(mq, NULL);
+}
+
+enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
{
- int ret;
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
-
- if (req && !mq->qcnt)
- /* claim host only for the first request */
- mmc_get_card(card, NULL);
+ struct mmc_host *host = card->host;
+ int ret;
ret = mmc_blk_part_switch(card, md->part_type);
- if (ret) {
- if (req) {
- blk_end_request_all(req, BLK_STS_IOERR);
- }
- goto out;
- }
+ if (ret)
+ return MMC_REQ_FAILED_TO_START;
- if (req) {
+ switch (mmc_issue_type(mq, req)) {
+ case MMC_ISSUE_SYNC:
+ ret = mmc_blk_wait_for_idle(mq, host);
+ if (ret)
+ return MMC_REQ_BUSY;
switch (req_op(req)) {
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
- /*
- * Complete ongoing async transfer before issuing
- * ioctl()s
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_drv_op(mq, req);
break;
case REQ_OP_DISCARD:
- /*
- * Complete ongoing async transfer before issuing
- * discard.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_discard_rq(mq, req);
break;
case REQ_OP_SECURE_ERASE:
- /*
- * Complete ongoing async transfer before issuing
- * secure erase.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_secdiscard_rq(mq, req);
break;
case REQ_OP_FLUSH:
- /*
- * Complete ongoing async transfer before issuing
- * flush.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_flush(mq, req);
break;
default:
- /* Normal request, just issue it */
- mmc_blk_issue_rw_rq(mq, req);
- card->host->context_info.is_waiting_last_req = false;
+ WARN_ON_ONCE(1);
+ return MMC_REQ_FAILED_TO_START;
+ }
+ return MMC_REQ_FINISHED;
+ case MMC_ISSUE_DCMD:
+ case MMC_ISSUE_ASYNC:
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
+ ret = mmc_blk_cqe_issue_flush(mq, req);
break;
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ if (mq->use_cqe)
+ ret = mmc_blk_cqe_issue_rw_rq(mq, req);
+ else
+ ret = mmc_blk_mq_issue_rw_rq(mq, req);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
}
- } else {
- /* No request, flushing the pipeline with NULL */
- mmc_blk_issue_rw_rq(mq, NULL);
- card->host->context_info.is_waiting_last_req = false;
+ if (!ret)
+ return MMC_REQ_STARTED;
+ return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START;
+ default:
+ WARN_ON_ONCE(1);
+ return MMC_REQ_FAILED_TO_START;
}
-
-out:
- if (!mq->qcnt)
- mmc_put_card(card, NULL);
}
static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -2156,6 +2320,18 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->queue.blkdata = md;
+ /*
+ * Keep an extra reference to the queue so that we can shutdown the
+ * queue (i.e. call blk_cleanup_queue()) while there are still
+ * references to the 'md'. The corresponding blk_put_queue() is in
+ * mmc_blk_put().
+ */
+ if (!blk_get_queue(md->queue.queue)) {
+ mmc_cleanup_queue(&md->queue);
+ ret = -ENODEV;
+ goto err_putdisk;
+ }
+
md->disk->major = MMC_BLOCK_MAJOR;
md->disk->first_minor = devidx * perdev_minors;
md->disk->fops = &mmc_bdops;
@@ -2471,10 +2647,6 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* from being accepted.
*/
card = md->queue.card;
- spin_lock_irq(md->queue.queue->queue_lock);
- queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
- spin_unlock_irq(md->queue.queue->queue_lock);
- blk_set_queue_dying(md->queue.queue);
mmc_cleanup_queue(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
@@ -2623,6 +2795,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
if (n != EXT_CSD_STR_LEN) {
err = -EINVAL;
+ kfree(ext_csd);
goto out_free;
}
diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
index 5946636..31153f6 100644
--- a/drivers/mmc/core/block.h
+++ b/drivers/mmc/core/block.h
@@ -5,6 +5,16 @@
struct mmc_queue;
struct request;
-void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
+void mmc_blk_cqe_recovery(struct mmc_queue *mq);
+
+enum mmc_issued;
+
+enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req);
+void mmc_blk_mq_complete(struct request *req);
+void mmc_blk_mq_recovery(struct mmc_queue *mq);
+
+struct work_struct;
+
+void mmc_blk_mq_complete_work(struct work_struct *work);
#endif
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 7586ff2..fc92c6c 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -351,8 +351,6 @@ int mmc_add_card(struct mmc_card *card)
#ifdef CONFIG_DEBUG_FS
mmc_add_card_debugfs(card);
#endif
- mmc_init_context_info(card->host);
-
card->dev.of_node = mmc_of_find_child_device(card->host, 0);
device_enable_async_suspend(&card->dev);
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index f06cd91..79a5b98 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -75,9 +75,11 @@ struct mmc_fixup {
#define EXT_CSD_REV_ANY (-1u)
#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_ATP 0x9
#define CID_MANFID_TOSHIBA 0x11
#define CID_MANFID_MICRON 0x13
#define CID_MANFID_SAMSUNG 0x15
+#define CID_MANFID_APACER 0x27
#define CID_MANFID_KINGSTON 0x70
#define CID_MANFID_HYNIX 0x90
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 1f0f44f..c0ba6d8 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -341,6 +341,8 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
+ init_completion(&mrq->cmd_completion);
+
mmc_retune_hold(host);
if (mmc_card_removed(host->card))
@@ -361,20 +363,6 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
}
EXPORT_SYMBOL(mmc_start_request);
-/*
- * mmc_wait_data_done() - done callback for data request
- * @mrq: done data request
- *
- * Wakes up mmc context, passed as a callback to host controller driver
- */
-static void mmc_wait_data_done(struct mmc_request *mrq)
-{
- struct mmc_context_info *context_info = &mrq->host->context_info;
-
- context_info->is_done_rcv = true;
- wake_up_interruptible(&context_info->wait);
-}
-
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
@@ -392,37 +380,6 @@ static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
wait_for_completion(&ongoing_mrq->cmd_completion);
}
-/*
- *__mmc_start_data_req() - starts data request
- * @host: MMC host to start the request
- * @mrq: data request to start
- *
- * Sets the done callback to be called when request is completed by the card.
- * Starts data mmc request execution
- * If an ongoing transfer is already in progress, wait for the command line
- * to become available before sending another command.
- */
-static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
-{
- int err;
-
- mmc_wait_ongoing_tfr_cmd(host);
-
- mrq->done = mmc_wait_data_done;
- mrq->host = host;
-
- init_completion(&mrq->cmd_completion);
-
- err = mmc_start_request(host, mrq);
- if (err) {
- mrq->cmd->error = err;
- mmc_complete_cmd(mrq);
- mmc_wait_data_done(mrq);
- }
-
- return err;
-}
-
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
@@ -432,8 +389,6 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
- init_completion(&mrq->cmd_completion);
-
err = mmc_start_request(host, mrq);
if (err) {
mrq->cmd->error = err;
@@ -650,164 +605,11 @@ EXPORT_SYMBOL(mmc_cqe_recovery);
*/
bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
{
- if (host->areq)
- return host->context_info.is_done_rcv;
- else
- return completion_done(&mrq->completion);
+ return completion_done(&mrq->completion);
}
EXPORT_SYMBOL(mmc_is_req_done);
/**
- * mmc_pre_req - Prepare for a new request
- * @host: MMC host to prepare command
- * @mrq: MMC request to prepare for
- *
- * mmc_pre_req() is called in prior to mmc_start_req() to let
- * host prepare for the new request. Preparation of a request may be
- * performed while another request is running on the host.
- */
-static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
-{
- if (host->ops->pre_req)
- host->ops->pre_req(host, mrq);
-}
-
-/**
- * mmc_post_req - Post process a completed request
- * @host: MMC host to post process command
- * @mrq: MMC request to post process for
- * @err: Error, if non zero, clean up any resources made in pre_req
- *
- * Let the host post process a completed request. Post processing of
- * a request may be performed while another reuqest is running.
- */
-static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
- int err)
-{
- if (host->ops->post_req)
- host->ops->post_req(host, mrq, err);
-}
-
-/**
- * mmc_finalize_areq() - finalize an asynchronous request
- * @host: MMC host to finalize any ongoing request on
- *
- * Returns the status of the ongoing asynchronous request, but
- * MMC_BLK_SUCCESS if no request was going on.
- */
-static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host)
-{
- struct mmc_context_info *context_info = &host->context_info;
- enum mmc_blk_status status;
-
- if (!host->areq)
- return MMC_BLK_SUCCESS;
-
- while (1) {
- wait_event_interruptible(context_info->wait,
- (context_info->is_done_rcv ||
- context_info->is_new_req));
-
- if (context_info->is_done_rcv) {
- struct mmc_command *cmd;
-
- context_info->is_done_rcv = false;
- cmd = host->areq->mrq->cmd;
-
- if (!cmd->error || !cmd->retries ||
- mmc_card_removed(host->card)) {
- status = host->areq->err_check(host->card,
- host->areq);
- break; /* return status */
- } else {
- mmc_retune_recheck(host);
- pr_info("%s: req failed (CMD%u): %d, retrying...\n",
- mmc_hostname(host),
- cmd->opcode, cmd->error);
- cmd->retries--;
- cmd->error = 0;
- __mmc_start_request(host, host->areq->mrq);
- continue; /* wait for done/new event again */
- }
- }
-
- return MMC_BLK_NEW_REQUEST;
- }
-
- mmc_retune_release(host);
-
- /*
- * Check BKOPS urgency for each R1 response
- */
- if (host->card && mmc_card_mmc(host->card) &&
- ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
- (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
- (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
- mmc_start_bkops(host->card, true);
- }
-
- return status;
-}
-
-/**
- * mmc_start_areq - start an asynchronous request
- * @host: MMC host to start command
- * @areq: asynchronous request to start
- * @ret_stat: out parameter for status
- *
- * Start a new MMC custom command request for a host.
- * If there is on ongoing async request wait for completion
- * of that request and start the new one and return.
- * Does not wait for the new request to complete.
- *
- * Returns the completed request, NULL in case of none completed.
- * Wait for the an ongoing request (previoulsy started) to complete and
- * return the completed request. If there is no ongoing request, NULL
- * is returned without waiting. NULL is not an error condition.
- */
-struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
- struct mmc_async_req *areq,
- enum mmc_blk_status *ret_stat)
-{
- enum mmc_blk_status status;
- int start_err = 0;
- struct mmc_async_req *previous = host->areq;
-
- /* Prepare a new request */
- if (areq)
- mmc_pre_req(host, areq->mrq);
-
- /* Finalize previous request */
- status = mmc_finalize_areq(host);
- if (ret_stat)
- *ret_stat = status;
-
- /* The previous request is still going on... */
- if (status == MMC_BLK_NEW_REQUEST)
- return NULL;
-
- /* Fine so far, start the new request! */
- if (status == MMC_BLK_SUCCESS && areq)
- start_err = __mmc_start_data_req(host, areq->mrq);
-
- /* Postprocess the old request at this point */
- if (host->areq)
- mmc_post_req(host, host->areq->mrq, 0);
-
- /* Cancel a prepared request if it was not started. */
- if ((status != MMC_BLK_SUCCESS || start_err) && areq)
- mmc_post_req(host, areq->mrq, -EINVAL);
-
- if (status != MMC_BLK_SUCCESS)
- host->areq = NULL;
- else
- host->areq = areq;
-
- return previous;
-}
-EXPORT_SYMBOL(mmc_start_areq);
-
-/**
* mmc_wait_for_req - start a request and wait for completion
* @host: MMC host to start command
* @mrq: MMC request to start
@@ -2959,6 +2761,14 @@ static int mmc_pm_notify(struct notifier_block *notify_block,
if (!err)
break;
+ if (!mmc_card_is_removable(host)) {
+ dev_warn(mmc_dev(host),
+ "pre_suspend failed for non-removable host: "
+ "%d\n", err);
+ /* Avoid removing non-removable hosts */
+ break;
+ }
+
/* Calling bus_ops->remove() with a claimed host can deadlock */
host->bus_ops->remove(host);
mmc_claim_host(host);
@@ -2994,22 +2804,6 @@ void mmc_unregister_pm_notifier(struct mmc_host *host)
}
#endif
-/**
- * mmc_init_context_info() - init synchronization context
- * @host: mmc host
- *
- * Init struct context_info needed to implement asynchronous
- * request mechanism, used by mmc core, host driver and mmc requests
- * supplier.
- */
-void mmc_init_context_info(struct mmc_host *host)
-{
- host->context_info.is_new_req = false;
- host->context_info.is_done_rcv = false;
- host->context_info.is_waiting_last_req = false;
- init_waitqueue_head(&host->context_info.wait);
-}
-
static int __init mmc_init(void)
{
int ret;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 71e6c6d..d6303d6 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -62,12 +62,10 @@ void mmc_set_initial_state(struct mmc_host *host);
static inline void mmc_delay(unsigned int ms)
{
- if (ms < 1000 / HZ) {
- cond_resched();
- mdelay(ms);
- } else {
+ if (ms <= 20)
+ usleep_range(ms * 1000, ms * 1250);
+ else
msleep(ms);
- }
}
void mmc_rescan(struct work_struct *work);
@@ -91,8 +89,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host);
void mmc_add_card_debugfs(struct mmc_card *card);
void mmc_remove_card_debugfs(struct mmc_card *card);
-void mmc_init_context_info(struct mmc_host *host);
-
int mmc_execute_tuning(struct mmc_card *card);
int mmc_hs200_to_hs400(struct mmc_card *card);
int mmc_hs400_to_hs200(struct mmc_card *card);
@@ -110,12 +106,6 @@ bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq);
-struct mmc_async_req;
-
-struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
- struct mmc_async_req *areq,
- enum mmc_blk_status *ret_stat);
-
int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int arg);
int mmc_can_erase(struct mmc_card *card);
@@ -152,4 +142,35 @@ int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq);
void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq);
int mmc_cqe_recovery(struct mmc_host *host);
+/**
+ * mmc_pre_req - Prepare for a new request
+ * @host: MMC host to prepare command
+ * @mrq: MMC request to prepare for
+ *
+ * mmc_pre_req() is called in prior to mmc_start_req() to let
+ * host prepare for the new request. Preparation of a request may be
+ * performed while another request is running on the host.
+ */
+static inline void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (host->ops->pre_req)
+ host->ops->pre_req(host, mrq);
+}
+
+/**
+ * mmc_post_req - Post process a completed request
+ * @host: MMC host to post process command
+ * @mrq: MMC request to post process for
+ * @err: Error, if non zero, clean up any resources made in pre_req
+ *
+ * Let the host post process a completed request. Post processing of
+ * a request may be performed while another request is running.
+ */
+static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
+ int err)
+{
+ if (host->ops->post_req)
+ host->ops->post_req(host, mrq, err);
+}
+
#endif
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index fb689a1..06ec19b 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -41,6 +41,11 @@ static inline int mmc_host_cmd23(struct mmc_host *host)
return host->caps & MMC_CAP_CMD23;
}
+static inline bool mmc_host_done_complete(struct mmc_host *host)
+{
+ return host->caps & MMC_CAP_DONE_COMPLETE;
+}
+
static inline int mmc_boot_partition_access(struct mmc_host *host)
{
return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
@@ -74,6 +79,5 @@ static inline bool mmc_card_hs400es(struct mmc_card *card)
return card->host->ios.enhanced_strobe;
}
-
#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index d209fb4..208a762 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1290,7 +1290,7 @@ out_err:
static void mmc_select_driver_type(struct mmc_card *card)
{
- int card_drv_type, drive_strength, drv_type;
+ int card_drv_type, drive_strength, drv_type = 0;
int fixed_drv_type = card->host->fixed_drv_type;
card_drv_type = card->ext_csd.raw_driver_strength |
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 4788698..ef18dae 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -101,7 +101,7 @@ struct mmc_test_transfer_result {
struct list_head link;
unsigned int count;
unsigned int sectors;
- struct timespec ts;
+ struct timespec64 ts;
unsigned int rate;
unsigned int iops;
};
@@ -171,11 +171,6 @@ struct mmc_test_multiple_rw {
enum mmc_test_prep_media prepare;
};
-struct mmc_test_async_req {
- struct mmc_async_req areq;
- struct mmc_test_card *test;
-};
-
/*******************************************************************/
/* General helper functions */
/*******************************************************************/
@@ -515,14 +510,11 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
/*
* Calculate transfer rate in bytes per second.
*/
-static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
+static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts)
{
uint64_t ns;
- ns = ts->tv_sec;
- ns *= 1000000000;
- ns += ts->tv_nsec;
-
+ ns = timespec64_to_ns(ts);
bytes *= 1000000000;
while (ns > UINT_MAX) {
@@ -542,7 +534,7 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
* Save transfer results for future usage
*/
static void mmc_test_save_transfer_result(struct mmc_test_card *test,
- unsigned int count, unsigned int sectors, struct timespec ts,
+ unsigned int count, unsigned int sectors, struct timespec64 ts,
unsigned int rate, unsigned int iops)
{
struct mmc_test_transfer_result *tr;
@@ -567,21 +559,21 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
* Print the transfer rate.
*/
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
- struct timespec *ts1, struct timespec *ts2)
+ struct timespec64 *ts1, struct timespec64 *ts2)
{
unsigned int rate, iops, sectors = bytes >> 9;
- struct timespec ts;
+ struct timespec64 ts;
- ts = timespec_sub(*ts2, *ts1);
+ ts = timespec64_sub(*ts2, *ts1);
rate = mmc_test_rate(bytes, &ts);
iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
- pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
+ pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
"seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
mmc_hostname(test->card->host), sectors, sectors >> 1,
- (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
- (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
+ (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec,
+ (u32)ts.tv_nsec, rate / 1000, rate / 1024,
iops / 100, iops % 100);
mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
@@ -591,24 +583,24 @@ static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
* Print the average transfer rate.
*/
static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
- unsigned int count, struct timespec *ts1,
- struct timespec *ts2)
+ unsigned int count, struct timespec64 *ts1,
+ struct timespec64 *ts2)
{
unsigned int rate, iops, sectors = bytes >> 9;
uint64_t tot = bytes * count;
- struct timespec ts;
+ struct timespec64 ts;
- ts = timespec_sub(*ts2, *ts1);
+ ts = timespec64_sub(*ts2, *ts1);
rate = mmc_test_rate(tot, &ts);
iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
- "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
+ "%llu.%09u seconds (%u kB/s, %u KiB/s, "
"%u.%02u IOPS, sg_len %d)\n",
mmc_hostname(test->card->host), count, sectors, count,
sectors >> 1, (sectors & 1 ? ".5" : ""),
- (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
+ (u64)ts.tv_sec, (u32)ts.tv_nsec,
rate / 1000, rate / 1024, iops / 100, iops % 100,
test->area.sg_len);
@@ -741,30 +733,6 @@ static int mmc_test_check_result(struct mmc_test_card *test,
return ret;
}
-static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card,
- struct mmc_async_req *areq)
-{
- struct mmc_test_async_req *test_async =
- container_of(areq, struct mmc_test_async_req, areq);
- int ret;
-
- mmc_test_wait_busy(test_async->test);
-
- /*
- * FIXME: this would earlier just casts a regular error code,
- * either of the kernel type -ERRORCODE or the local test framework
- * RESULT_* errorcode, into an enum mmc_blk_status and return as
- * result check. Instead, convert it to some reasonable type by just
- * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR.
- * If possible, a reasonable error code should be returned.
- */
- ret = mmc_test_check_result(test_async->test, areq->mrq);
- if (ret)
- return MMC_BLK_CMD_ERR;
-
- return MMC_BLK_SUCCESS;
-}
-
/*
* Checks that a "short transfer" behaved as expected
*/
@@ -831,6 +799,45 @@ static struct mmc_test_req *mmc_test_req_alloc(void)
return rq;
}
+static void mmc_test_wait_done(struct mmc_request *mrq)
+{
+ complete(&mrq->completion);
+}
+
+static int mmc_test_start_areq(struct mmc_test_card *test,
+ struct mmc_request *mrq,
+ struct mmc_request *prev_mrq)
+{
+ struct mmc_host *host = test->card->host;
+ int err = 0;
+
+ if (mrq) {
+ init_completion(&mrq->completion);
+ mrq->done = mmc_test_wait_done;
+ mmc_pre_req(host, mrq);
+ }
+
+ if (prev_mrq) {
+ wait_for_completion(&prev_mrq->completion);
+ err = mmc_test_wait_busy(test);
+ if (!err)
+ err = mmc_test_check_result(test, prev_mrq);
+ }
+
+ if (!err && mrq) {
+ err = mmc_start_request(host, mrq);
+ if (err)
+ mmc_retune_release(host);
+ }
+
+ if (prev_mrq)
+ mmc_post_req(host, prev_mrq, 0);
+
+ if (err && mrq)
+ mmc_post_req(host, mrq, err);
+
+ return err;
+}
static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
struct scatterlist *sg, unsigned sg_len,
@@ -838,17 +845,10 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
unsigned blksz, int write, int count)
{
struct mmc_test_req *rq1, *rq2;
- struct mmc_test_async_req test_areq[2];
- struct mmc_async_req *done_areq;
- struct mmc_async_req *cur_areq = &test_areq[0].areq;
- struct mmc_async_req *other_areq = &test_areq[1].areq;
- enum mmc_blk_status status;
+ struct mmc_request *mrq, *prev_mrq;
int i;
int ret = RESULT_OK;
- test_areq[0].test = test;
- test_areq[1].test = test;
-
rq1 = mmc_test_req_alloc();
rq2 = mmc_test_req_alloc();
if (!rq1 || !rq2) {
@@ -856,33 +856,25 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
goto err;
}
- cur_areq->mrq = &rq1->mrq;
- cur_areq->err_check = mmc_test_check_result_async;
- other_areq->mrq = &rq2->mrq;
- other_areq->err_check = mmc_test_check_result_async;
+ mrq = &rq1->mrq;
+ prev_mrq = NULL;
for (i = 0; i < count; i++) {
- mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
- blocks, blksz, write);
- done_areq = mmc_start_areq(test->card->host, cur_areq, &status);
-
- if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
- ret = RESULT_FAIL;
+ mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
+ mmc_test_prepare_mrq(test, mrq, sg, sg_len, dev_addr, blocks,
+ blksz, write);
+ ret = mmc_test_start_areq(test, mrq, prev_mrq);
+ if (ret)
goto err;
- }
- if (done_areq)
- mmc_test_req_reset(container_of(done_areq->mrq,
- struct mmc_test_req, mrq));
+ if (!prev_mrq)
+ prev_mrq = &rq2->mrq;
- swap(cur_areq, other_areq);
+ swap(mrq, prev_mrq);
dev_addr += blocks;
}
- done_areq = mmc_start_areq(test->card->host, NULL, &status);
- if (status != MMC_BLK_SUCCESS)
- ret = RESULT_FAIL;
-
+ ret = mmc_test_start_areq(test, NULL, prev_mrq);
err:
kfree(rq1);
kfree(rq2);
@@ -1449,7 +1441,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
int max_scatter, int timed, int count,
bool nonblock, int min_sg_len)
{
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret = 0;
int i;
struct mmc_test_area *t = &test->area;
@@ -1475,7 +1467,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
return ret;
if (timed)
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
if (nonblock)
ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
dev_addr, t->blocks, 512, write, count);
@@ -1489,7 +1481,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
return ret;
if (timed)
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
if (timed)
mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
@@ -1747,7 +1739,7 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
if (!mmc_can_trim(test->card))
@@ -1758,19 +1750,19 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
for (sz = 512; sz < t->max_sz; sz <<= 1) {
dev_addr = t->dev_addr + (sz >> 9);
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
return ret;
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
}
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
return ret;
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
return 0;
}
@@ -1779,19 +1771,19 @@ static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
@@ -1818,7 +1810,7 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
ret = mmc_test_area_erase(test);
@@ -1826,14 +1818,14 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
return ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
@@ -1864,7 +1856,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
if (!mmc_can_trim(test->card))
@@ -1882,7 +1874,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
return ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_erase(test->card, dev_addr, sz >> 9,
MMC_TRIM_ARG);
@@ -1890,7 +1882,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
}
return 0;
@@ -1912,7 +1904,7 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
{
unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
unsigned int ssz;
- struct timespec ts1, ts2, ts;
+ struct timespec64 ts1, ts2, ts;
int ret;
ssz = sz >> 9;
@@ -1921,10 +1913,10 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
range1 = rnd_addr / test->card->pref_erase;
range2 = range1 / ssz;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (cnt = 0; cnt < UINT_MAX; cnt++) {
- getnstimeofday(&ts2);
- ts = timespec_sub(ts2, ts1);
+ ktime_get_ts64(&ts2);
+ ts = timespec64_sub(ts2, ts1);
if (ts.tv_sec >= 10)
break;
ea = mmc_test_rnd_num(range1);
@@ -1998,7 +1990,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt, sz, ssz;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
sz = t->max_tfr;
@@ -2025,7 +2017,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
cnt = tot_sz / sz;
dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, write,
max_scatter, 0);
@@ -2033,7 +2025,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
return ret;
dev_addr += ssz;
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
@@ -2328,10 +2320,17 @@ static int mmc_test_reset(struct mmc_test_card *test)
int err;
err = mmc_hw_reset(host);
- if (!err)
+ if (!err) {
+ /*
+ * Reset will re-enable the card's command queue, but tests
+ * expect it to be disabled.
+ */
+ if (card->ext_csd.cmdq_en)
+ mmc_cmdq_disable(card);
return RESULT_OK;
- else if (err == -EOPNOTSUPP)
+ } else if (err == -EOPNOTSUPP) {
return RESULT_UNSUP_HOST;
+ }
return RESULT_FAIL;
}
@@ -2356,11 +2355,9 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
struct mmc_test_req *rq = mmc_test_req_alloc();
struct mmc_host *host = test->card->host;
struct mmc_test_area *t = &test->area;
- struct mmc_test_async_req test_areq = { .test = test };
struct mmc_request *mrq;
unsigned long timeout;
bool expired = false;
- enum mmc_blk_status blkstat = MMC_BLK_SUCCESS;
int ret = 0, cmd_ret;
u32 status = 0;
int count = 0;
@@ -2373,9 +2370,6 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
mrq->sbc = &rq->sbc;
mrq->cap_cmd_during_tfr = true;
- test_areq.areq.mrq = mrq;
- test_areq.areq.err_check = mmc_test_check_result_async;
-
mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
512, write);
@@ -2388,11 +2382,9 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Start ongoing data request */
if (use_areq) {
- mmc_start_areq(host, &test_areq.areq, &blkstat);
- if (blkstat != MMC_BLK_SUCCESS) {
- ret = RESULT_FAIL;
+ ret = mmc_test_start_areq(test, mrq, NULL);
+ if (ret)
goto out_free;
- }
} else {
mmc_wait_for_req(host, mrq);
}
@@ -2426,9 +2418,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Wait for data request to complete */
if (use_areq) {
- mmc_start_areq(host, NULL, &blkstat);
- if (blkstat != MMC_BLK_SUCCESS)
- ret = RESULT_FAIL;
+ ret = mmc_test_start_areq(test, NULL, mrq);
} else {
mmc_wait_for_req_done(test->card->host, mrq);
}
@@ -3066,10 +3056,9 @@ static int mtf_test_show(struct seq_file *sf, void *data)
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
list_for_each_entry(tr, &gr->tr_lst, link) {
- seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
+ seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
tr->count, tr->sectors,
- (unsigned long)tr->ts.tv_sec,
- (unsigned long)tr->ts.tv_nsec,
+ (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
tr->rate, tr->iops / 100, tr->iops % 100);
}
}
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 4f33d27..421fab7 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -22,100 +22,147 @@
#include "block.h"
#include "core.h"
#include "card.h"
+#include "host.h"
-/*
- * Prepare a MMC request. This just filters out odd stuff.
- */
-static int mmc_prep_request(struct request_queue *q, struct request *req)
+static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
{
- struct mmc_queue *mq = q->queuedata;
+ /* Allow only 1 DCMD at a time */
+ return mq->in_flight[MMC_ISSUE_DCMD];
+}
- if (mq && mmc_card_removed(mq->card))
- return BLKPREP_KILL;
+void mmc_cqe_check_busy(struct mmc_queue *mq)
+{
+ if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
+ mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
- req->rq_flags |= RQF_DONTPREP;
+ mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
+}
- return BLKPREP_OK;
+static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
+{
+ return host->caps2 & MMC_CAP2_CQE_DCMD;
}
-static int mmc_queue_thread(void *d)
+static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
+ struct request *req)
{
- struct mmc_queue *mq = d;
- struct request_queue *q = mq->queue;
- struct mmc_context_info *cntx = &mq->card->host->context_info;
+ switch (req_op(req)) {
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ return MMC_ISSUE_SYNC;
+ case REQ_OP_FLUSH:
+ return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
+ default:
+ return MMC_ISSUE_ASYNC;
+ }
+}
- current->flags |= PF_MEMALLOC;
+enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_host *host = mq->card->host;
- down(&mq->thread_sem);
- do {
- struct request *req;
+ if (mq->use_cqe)
+ return mmc_cqe_issue_type(host, req);
- spin_lock_irq(q->queue_lock);
- set_current_state(TASK_INTERRUPTIBLE);
- req = blk_fetch_request(q);
- mq->asleep = false;
- cntx->is_waiting_last_req = false;
- cntx->is_new_req = false;
- if (!req) {
- /*
- * Dispatch queue is empty so set flags for
- * mmc_request_fn() to wake us up.
- */
- if (mq->qcnt)
- cntx->is_waiting_last_req = true;
- else
- mq->asleep = true;
- }
- spin_unlock_irq(q->queue_lock);
+ if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
+ return MMC_ISSUE_ASYNC;
- if (req || mq->qcnt) {
- set_current_state(TASK_RUNNING);
- mmc_blk_issue_rq(mq, req);
- cond_resched();
- } else {
- if (kthread_should_stop()) {
- set_current_state(TASK_RUNNING);
- break;
- }
- up(&mq->thread_sem);
- schedule();
- down(&mq->thread_sem);
- }
- } while (1);
- up(&mq->thread_sem);
+ return MMC_ISSUE_SYNC;
+}
- return 0;
+static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
+{
+ if (!mq->recovery_needed) {
+ mq->recovery_needed = true;
+ schedule_work(&mq->recovery_work);
+ }
}
-/*
- * Generic MMC request handler. This is called for any queue on a
- * particular host. When the host is not busy, we look for a request
- * on any queue on this host, and attempt to issue it. This may
- * not be the queue we were asked to process.
- */
-static void mmc_request_fn(struct request_queue *q)
+void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
- struct request *req;
- struct mmc_context_info *cntx;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __mmc_cqe_recovery_notifier(mq);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
- if (!mq) {
- while ((req = blk_fetch_request(q)) != NULL) {
- req->rq_flags |= RQF_QUIET;
- __blk_end_request_all(req, BLK_STS_IOERR);
+static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_queue *mq = req->q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ bool recovery_needed = false;
+
+ switch (issue_type) {
+ case MMC_ISSUE_ASYNC:
+ case MMC_ISSUE_DCMD:
+ if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
+ if (recovery_needed)
+ __mmc_cqe_recovery_notifier(mq);
+ return BLK_EH_RESET_TIMER;
}
- return;
+ /* No timeout */
+ return BLK_EH_HANDLED;
+ default:
+ /* Timeout is handled by mmc core */
+ return BLK_EH_RESET_TIMER;
}
+}
+
+static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
+ bool reserved)
+{
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ unsigned long flags;
+ int ret;
- cntx = &mq->card->host->context_info;
+ spin_lock_irqsave(q->queue_lock, flags);
- if (cntx->is_waiting_last_req) {
- cntx->is_new_req = true;
- wake_up_interruptible(&cntx->wait);
- }
+ if (mq->recovery_needed || !mq->use_cqe)
+ ret = BLK_EH_RESET_TIMER;
+ else
+ ret = mmc_cqe_timed_out(req);
- if (mq->asleep)
- wake_up_process(mq->thread);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return ret;
+}
+
+static void mmc_mq_recovery_handler(struct work_struct *work)
+{
+ struct mmc_queue *mq = container_of(work, struct mmc_queue,
+ recovery_work);
+ struct request_queue *q = mq->queue;
+
+ mmc_get_card(mq->card, &mq->ctx);
+
+ mq->in_recovery = true;
+
+ if (mq->use_cqe)
+ mmc_blk_cqe_recovery(mq);
+ else
+ mmc_blk_mq_recovery(mq);
+
+ mq->in_recovery = false;
+
+ spin_lock_irq(q->queue_lock);
+ mq->recovery_needed = false;
+ spin_unlock_irq(q->queue_lock);
+
+ mmc_put_card(mq->card, &mq->ctx);
+
+ blk_mq_run_hw_queues(q, true);
}
static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
@@ -154,11 +201,10 @@ static void mmc_queue_setup_discard(struct request_queue *q,
* @req: the request
* @gfp: memory allocation policy
*/
-static int mmc_init_request(struct request_queue *q, struct request *req,
- gfp_t gfp)
+static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
+ gfp_t gfp)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
- struct mmc_queue *mq = q->queuedata;
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
@@ -177,6 +223,131 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
mq_rq->sg = NULL;
}
+static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx, unsigned int numa_node)
+{
+ return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
+}
+
+static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx)
+{
+ struct mmc_queue *mq = set->driver_data;
+
+ mmc_exit_request(mq->queue, req);
+}
+
+/*
+ * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests
+ * will not be dispatched in parallel.
+ */
+static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *req = bd->rq;
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ enum mmc_issue_type issue_type;
+ enum mmc_issued issued;
+ bool get_card, cqe_retune_ok;
+ int ret;
+
+ if (mmc_card_removed(mq->card)) {
+ req->rq_flags |= RQF_QUIET;
+ return BLK_STS_IOERR;
+ }
+
+ issue_type = mmc_issue_type(mq, req);
+
+ spin_lock_irq(q->queue_lock);
+
+ if (mq->recovery_needed) {
+ spin_unlock_irq(q->queue_lock);
+ return BLK_STS_RESOURCE;
+ }
+
+ switch (issue_type) {
+ case MMC_ISSUE_DCMD:
+ if (mmc_cqe_dcmd_busy(mq)) {
+ mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
+ spin_unlock_irq(q->queue_lock);
+ return BLK_STS_RESOURCE;
+ }
+ break;
+ case MMC_ISSUE_ASYNC:
+ break;
+ default:
+ /*
+ * Timeouts are handled by mmc core, and we don't have a host
+ * API to abort requests, so we can't handle the timeout anyway.
+ * However, when the timeout happens, blk_mq_complete_request()
+ * no longer works (to stop the request disappearing under us).
+ * To avoid racing with that, set a large timeout.
+ */
+ req->timeout = 600 * HZ;
+ break;
+ }
+
+ mq->in_flight[issue_type] += 1;
+ get_card = (mmc_tot_in_flight(mq) == 1);
+ cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
+
+ spin_unlock_irq(q->queue_lock);
+
+ if (!(req->rq_flags & RQF_DONTPREP)) {
+ req_to_mmc_queue_req(req)->retries = 0;
+ req->rq_flags |= RQF_DONTPREP;
+ }
+
+ if (get_card)
+ mmc_get_card(card, &mq->ctx);
+
+ if (mq->use_cqe) {
+ host->retune_now = host->need_retune && cqe_retune_ok &&
+ !host->hold_retune;
+ }
+
+ blk_mq_start_request(req);
+
+ issued = mmc_blk_mq_issue_rq(mq, req);
+
+ switch (issued) {
+ case MMC_REQ_BUSY:
+ ret = BLK_STS_RESOURCE;
+ break;
+ case MMC_REQ_FAILED_TO_START:
+ ret = BLK_STS_IOERR;
+ break;
+ default:
+ ret = BLK_STS_OK;
+ break;
+ }
+
+ if (issued != MMC_REQ_STARTED) {
+ bool put_card = false;
+
+ spin_lock_irq(q->queue_lock);
+ mq->in_flight[issue_type] -= 1;
+ if (mmc_tot_in_flight(mq) == 0)
+ put_card = true;
+ spin_unlock_irq(q->queue_lock);
+ if (put_card)
+ mmc_put_card(card, &mq->ctx);
+ }
+
+ return ret;
+}
+
+static const struct blk_mq_ops mmc_mq_ops = {
+ .queue_rq = mmc_mq_queue_rq,
+ .init_request = mmc_mq_init_request,
+ .exit_request = mmc_mq_exit_request,
+ .complete = mmc_blk_mq_complete,
+ .timeout = mmc_mq_timed_out,
+};
+
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
@@ -196,124 +367,139 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- /* Initialize thread_sem even if it is not used */
- sema_init(&mq->thread_sem, 1);
+ INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
+ INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
+
+ mutex_init(&mq->complete_lock);
+
+ init_waitqueue_head(&mq->wait);
}
-/**
- * mmc_init_queue - initialise a queue structure.
- * @mq: mmc queue
- * @card: mmc card to attach this queue
- * @lock: queue lock
- * @subname: partition subname
- *
- * Initialise a MMC card request queue.
- */
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
- spinlock_t *lock, const char *subname)
+static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth,
+ const struct blk_mq_ops *mq_ops, spinlock_t *lock)
{
- struct mmc_host *host = card->host;
- int ret = -ENOMEM;
-
- mq->card = card;
- mq->queue = blk_alloc_queue(GFP_KERNEL);
- if (!mq->queue)
- return -ENOMEM;
- mq->queue->queue_lock = lock;
- mq->queue->request_fn = mmc_request_fn;
- mq->queue->init_rq_fn = mmc_init_request;
- mq->queue->exit_rq_fn = mmc_exit_request;
- mq->queue->cmd_size = sizeof(struct mmc_queue_req);
- mq->queue->queuedata = mq;
- mq->qcnt = 0;
- ret = blk_init_allocated_queue(mq->queue);
- if (ret) {
- blk_cleanup_queue(mq->queue);
+ int ret;
+
+ memset(&mq->tag_set, 0, sizeof(mq->tag_set));
+ mq->tag_set.ops = mq_ops;
+ mq->tag_set.queue_depth = q_depth;
+ mq->tag_set.numa_node = NUMA_NO_NODE;
+ mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE |
+ BLK_MQ_F_BLOCKING;
+ mq->tag_set.nr_hw_queues = 1;
+ mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
+ mq->tag_set.driver_data = mq;
+
+ ret = blk_mq_alloc_tag_set(&mq->tag_set);
+ if (ret)
return ret;
- }
-
- blk_queue_prep_rq(mq->queue, mmc_prep_request);
-
- mmc_setup_queue(mq, card);
- mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
- host->index, subname ? subname : "");
-
- if (IS_ERR(mq->thread)) {
- ret = PTR_ERR(mq->thread);
- goto cleanup_queue;
+ mq->queue = blk_mq_init_queue(&mq->tag_set);
+ if (IS_ERR(mq->queue)) {
+ ret = PTR_ERR(mq->queue);
+ goto free_tag_set;
}
+ mq->queue->queue_lock = lock;
+ mq->queue->queuedata = mq;
+
return 0;
-cleanup_queue:
- blk_cleanup_queue(mq->queue);
+free_tag_set:
+ blk_mq_free_tag_set(&mq->tag_set);
+
return ret;
}
-void mmc_cleanup_queue(struct mmc_queue *mq)
-{
- struct request_queue *q = mq->queue;
- unsigned long flags;
+/* Set queue depth to get a reasonable value for q->nr_requests */
+#define MMC_QUEUE_DEPTH 64
- /* Make sure the queue isn't suspended, as that will deadlock */
- mmc_queue_resume(mq);
+static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock)
+{
+ struct mmc_host *host = card->host;
+ int q_depth;
+ int ret;
+
+ /*
+ * The queue depth for CQE must match the hardware because the request
+ * tag is used to index the hardware queue.
+ */
+ if (mq->use_cqe)
+ q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
+ else
+ q_depth = MMC_QUEUE_DEPTH;
+
+ ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock);
+ if (ret)
+ return ret;
- /* Then terminate our worker thread */
- kthread_stop(mq->thread);
+ blk_queue_rq_timeout(mq->queue, 60 * HZ);
- /* Empty the queue */
- spin_lock_irqsave(q->queue_lock, flags);
- q->queuedata = NULL;
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ mmc_setup_queue(mq, card);
- mq->card = NULL;
+ return 0;
}
-EXPORT_SYMBOL(mmc_cleanup_queue);
/**
- * mmc_queue_suspend - suspend a MMC request queue
- * @mq: MMC queue to suspend
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+ * @card: mmc card to attach this queue
+ * @lock: queue lock
+ * @subname: partition subname
*
- * Stop the block request queue, and wait for our thread to
- * complete any outstanding requests. This ensures that we
- * won't suspend while a request is being processed.
+ * Initialise a MMC card request queue.
*/
-void mmc_queue_suspend(struct mmc_queue *mq)
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock, const char *subname)
{
- struct request_queue *q = mq->queue;
- unsigned long flags;
+ struct mmc_host *host = card->host;
- if (!mq->suspended) {
- mq->suspended |= true;
+ mq->card = card;
- spin_lock_irqsave(q->queue_lock, flags);
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ mq->use_cqe = host->cqe_enabled;
- down(&mq->thread_sem);
- }
+ return mmc_mq_init(mq, card, lock);
+}
+
+void mmc_queue_suspend(struct mmc_queue *mq)
+{
+ blk_mq_quiesce_queue(mq->queue);
+
+ /*
+ * The host remains claimed while there are outstanding requests, so
+ * simply claiming and releasing here ensures there are none.
+ */
+ mmc_claim_host(mq->card->host);
+ mmc_release_host(mq->card->host);
}
-/**
- * mmc_queue_resume - resume a previously suspended MMC request queue
- * @mq: MMC queue to resume
- */
void mmc_queue_resume(struct mmc_queue *mq)
{
+ blk_mq_unquiesce_queue(mq->queue);
+}
+
+void mmc_cleanup_queue(struct mmc_queue *mq)
+{
struct request_queue *q = mq->queue;
- unsigned long flags;
- if (mq->suspended) {
- mq->suspended = false;
+ /*
+ * The legacy code handled the possibility of being suspended,
+ * so do that here too.
+ */
+ if (blk_queue_quiesced(q))
+ blk_mq_unquiesce_queue(q);
- up(&mq->thread_sem);
+ blk_cleanup_queue(q);
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
+ /*
+ * A request can be completed before the next request, potentially
+ * leaving a complete_work with nothing to do. Such a work item might
+ * still be queued at this point. Flush it.
+ */
+ flush_work(&mq->complete_work);
+
+ mq->card = NULL;
}
/*
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 547b457..17e59d5 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -8,6 +8,20 @@
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
+enum mmc_issued {
+ MMC_REQ_STARTED,
+ MMC_REQ_BUSY,
+ MMC_REQ_FAILED_TO_START,
+ MMC_REQ_FINISHED,
+};
+
+enum mmc_issue_type {
+ MMC_ISSUE_SYNC,
+ MMC_ISSUE_DCMD,
+ MMC_ISSUE_ASYNC,
+ MMC_ISSUE_MAX,
+};
+
static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
{
return blk_mq_rq_to_pdu(rq);
@@ -20,7 +34,6 @@ static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr)
return blk_mq_rq_from_pdu(mqr);
}
-struct task_struct;
struct mmc_blk_data;
struct mmc_blk_ioc_data;
@@ -30,7 +43,6 @@ struct mmc_blk_request {
struct mmc_command cmd;
struct mmc_command stop;
struct mmc_data data;
- int retune_retry_done;
};
/**
@@ -52,28 +64,34 @@ enum mmc_drv_op {
struct mmc_queue_req {
struct mmc_blk_request brq;
struct scatterlist *sg;
- struct mmc_async_req areq;
enum mmc_drv_op drv_op;
int drv_op_result;
void *drv_op_data;
unsigned int ioc_count;
+ int retries;
};
struct mmc_queue {
struct mmc_card *card;
- struct task_struct *thread;
- struct semaphore thread_sem;
- bool suspended;
- bool asleep;
+ struct mmc_ctx ctx;
+ struct blk_mq_tag_set tag_set;
struct mmc_blk_data *blkdata;
struct request_queue *queue;
- /*
- * FIXME: this counter is not a very reliable way of keeping
- * track of how many requests that are ongoing. Switch to just
- * letting the block core keep track of requests and per-request
- * associated mmc_queue_req data.
- */
- int qcnt;
+ int in_flight[MMC_ISSUE_MAX];
+ unsigned int cqe_busy;
+#define MMC_CQE_DCMD_BUSY BIT(0)
+#define MMC_CQE_QUEUE_FULL BIT(1)
+ bool use_cqe;
+ bool recovery_needed;
+ bool in_recovery;
+ bool rw_wait;
+ bool waiting;
+ struct work_struct recovery_work;
+ wait_queue_head_t wait;
+ struct request *recovery_req;
+ struct request *complete_req;
+ struct mutex complete_lock;
+ struct work_struct complete_work;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -84,4 +102,22 @@ extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *);
+void mmc_cqe_check_busy(struct mmc_queue *mq);
+void mmc_cqe_recovery_notifier(struct mmc_request *mrq);
+
+enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req);
+
+static inline int mmc_tot_in_flight(struct mmc_queue *mq)
+{
+ return mq->in_flight[MMC_ISSUE_SYNC] +
+ mq->in_flight[MMC_ISSUE_DCMD] +
+ mq->in_flight[MMC_ISSUE_ASYNC];
+}
+
+static inline int mmc_cqe_qcnt(struct mmc_queue *mq)
+{
+ return mq->in_flight[MMC_ISSUE_DCMD] +
+ mq->in_flight[MMC_ISSUE_ASYNC];
+}
+
#endif
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index f664e9c..75d3176 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -53,6 +53,14 @@ static const struct mmc_fixup mmc_blk_fixups[] = {
MMC_QUIRK_BLK_NO_CMD23),
/*
+ * Some SD cards lockup while using CMD23 multiblock transfers.
+ */
+ MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("APUSD", CID_MANFID_APACER, 0x5048, add_quirk_sd,
+ MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
* Some MMC cards need longer data read timeout than indicated in CSD.
*/
MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 863f1db..3698b05 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -121,20 +121,18 @@ EXPORT_SYMBOL(mmc_gpio_request_ro);
void mmc_gpiod_request_cd_irq(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
- int ret, irq;
+ int irq = -EINVAL;
+ int ret;
if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio)
return;
- irq = gpiod_to_irq(ctx->cd_gpio);
-
/*
- * Even if gpiod_to_irq() returns a valid IRQ number, the platform might
- * still prefer to poll, e.g., because that IRQ number is already used
- * by another unit and cannot be shared.
+ * Do not use IRQ if the platform prefers to poll, e.g., because that
+ * IRQ number is already used by another unit and cannot be shared.
*/
- if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL)
- irq = -EINVAL;
+ if (!(host->caps & MMC_CAP_NEEDS_POLL))
+ irq = gpiod_to_irq(ctx->cd_gpio);
if (irq >= 0) {
if (!ctx->cd_gpio_isr)
@@ -307,3 +305,11 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
return 0;
}
EXPORT_SYMBOL(mmc_gpiod_request_ro);
+
+bool mmc_can_gpio_ro(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ return ctx->ro_gpio ? true : false;
+}
+EXPORT_SYMBOL(mmc_can_gpio_ro);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 567028c..67bd334 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -81,6 +81,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
config MMC_SDHCI_PCI
tristate "SDHCI support on PCI bus"
depends on MMC_SDHCI && PCI
+ select MMC_CQHCI
help
This selects the PCI Secure Digital Host Controller Interface.
Most controllers found today are PCI devices.
@@ -132,6 +133,7 @@ config MMC_SDHCI_OF_ARASAN
depends on MMC_SDHCI_PLTFM
depends on OF
depends on COMMON_CLK
+ select MMC_CQHCI
help
This selects the Arasan Secure Digital Host Controller Interface
(SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC.
@@ -320,7 +322,7 @@ config MMC_SDHCI_BCM_KONA
config MMC_SDHCI_F_SDH30
tristate "SDHCI support for Fujitsu Semiconductor F_SDH30"
depends on MMC_SDHCI_PLTFM
- depends on OF
+ depends on OF || ACPI
help
This selects the Secure Digital Host Controller Interface (SDHCI)
Needed by some Fujitsu SoC for MMC / SD / SDIO support.
@@ -595,11 +597,8 @@ config MMC_TMIO
config MMC_SDHI
tristate "Renesas SDHI SD/SDIO controller support"
- depends on SUPERH || ARM || ARM64
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
select MMC_TMIO_CORE
- select MMC_SDHI_SYS_DMAC if (SUPERH || ARM)
- select MMC_SDHI_INTERNAL_DMAC if ARM64
help
This provides support for the SDHI SD/SDIO controller found in
Renesas SuperH, ARM and ARM64 based SoCs
@@ -607,6 +606,7 @@ config MMC_SDHI
config MMC_SDHI_SYS_DMAC
tristate "DMA for SDHI SD/SDIO controllers using SYS-DMAC"
depends on MMC_SDHI
+ default MMC_SDHI if (SUPERH || ARM)
help
This provides DMA support for SDHI SD/SDIO controllers
using SYS-DMAC via DMA Engine. This supports the controllers
@@ -616,6 +616,7 @@ config MMC_SDHI_INTERNAL_DMAC
tristate "DMA for SDHI SD/SDIO controllers using on-chip bus mastering"
depends on ARM64 || COMPILE_TEST
depends on MMC_SDHI
+ default MMC_SDHI if ARM64
help
This provides DMA support for SDHI SD/SDIO controllers
using on-chip bus mastering. This supports the controllers
@@ -838,14 +839,14 @@ config MMC_USDHI6ROL0
config MMC_REALTEK_PCI
tristate "Realtek PCI-E SD/MMC Card Interface Driver"
- depends on MFD_RTSX_PCI
+ depends on MISC_RTSX_PCI
help
Say Y here to include driver code to support SD/MMC card interface
of Realtek PCI-E card reader
config MMC_REALTEK_USB
tristate "Realtek USB SD/MMC Card Interface Driver"
- depends on MFD_RTSX_USB
+ depends on MISC_RTSX_USB
help
Say Y here to include driver code to support SD/MMC card interface
of Realtek RTS5129/39 series card reader
@@ -857,6 +858,19 @@ config MMC_SUNXI
This selects support for the SD/MMC Host Controller on
Allwinner sunxi SoCs.
+config MMC_CQHCI
+ tristate "Command Queue Host Controller Interface support"
+ depends on HAS_DMA
+ help
+ This selects the Command Queue Host Controller Interface (CQHCI)
+ support present in host controllers of Qualcomm Technologies, Inc
+ amongst others.
+ This controller supports eMMC devices with command queue support.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_TOSHIBA_PCI
tristate "Toshiba Type A SD/MMC Card Interface Driver"
depends on PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index a43cf0d..84cd138 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
-sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o
+sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o
obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
@@ -39,12 +39,8 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o
-ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_SYS_DMAC)),y)
-obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_sys_dmac.o
-endif
-ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_INTERNAL_DMAC)),y)
-obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_internal_dmac.o
-endif
+obj-$(CONFIG_MMC_SDHI_SYS_DMAC) += renesas_sdhi_sys_dmac.o
+obj-$(CONFIG_MMC_SDHI_INTERNAL_DMAC) += renesas_sdhi_internal_dmac.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
@@ -92,6 +88,7 @@ obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
+obj-$(CONFIG_MMC_CQHCI) += cqhci.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index 63fe509..63d2758 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -42,13 +42,11 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/clk.h>
-#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/types.h>
-#include <asm/io.h>
#include <linux/uaccess.h>
#define DRIVER_NAME "goldfish_mmc"
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
new file mode 100644
index 0000000..159270e
--- /dev/null
+++ b/drivers/mmc/host/cqhci.c
@@ -0,0 +1,1150 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/platform_device.h>
+#include <linux/ktime.h>
+
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+
+#include "cqhci.h"
+
+#define DCMD_SLOT 31
+#define NUM_SLOTS 32
+
+struct cqhci_slot {
+ struct mmc_request *mrq;
+ unsigned int flags;
+#define CQHCI_EXTERNAL_TIMEOUT BIT(0)
+#define CQHCI_COMPLETED BIT(1)
+#define CQHCI_HOST_CRC BIT(2)
+#define CQHCI_HOST_TIMEOUT BIT(3)
+#define CQHCI_HOST_OTHER BIT(4)
+};
+
+static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->desc_base + (tag * cq_host->slot_sz);
+}
+
+static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ u8 *desc = get_desc(cq_host, tag);
+
+ return desc + cq_host->task_desc_len;
+}
+
+static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_dma_base +
+ (cq_host->mmc->max_segs * tag *
+ cq_host->trans_desc_len);
+}
+
+static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_base +
+ (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
+}
+
+static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ u8 *link_temp;
+ dma_addr_t trans_temp;
+
+ link_temp = get_link_desc(cq_host, tag);
+ trans_temp = get_trans_desc_dma(cq_host, tag);
+
+ memset(link_temp, 0, cq_host->link_desc_len);
+ if (cq_host->link_desc_len > 8)
+ *(link_temp + 8) = 0;
+
+ if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
+ *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
+ return;
+ }
+
+ *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
+
+ if (cq_host->dma64) {
+ __le64 *data_addr = (__le64 __force *)(link_temp + 4);
+
+ data_addr[0] = cpu_to_le64(trans_temp);
+ } else {
+ __le32 *data_addr = (__le32 __force *)(link_temp + 4);
+
+ data_addr[0] = cpu_to_le32(trans_temp);
+ }
+}
+
+static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
+{
+ cqhci_writel(cq_host, set, CQHCI_ISTE);
+ cqhci_writel(cq_host, set, CQHCI_ISGE);
+}
+
+#define DRV_NAME "cqhci"
+
+#define CQHCI_DUMP(f, x...) \
+ pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
+
+static void cqhci_dumpregs(struct cqhci_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+
+ CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
+
+ CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CAP),
+ cqhci_readl(cq_host, CQHCI_VER));
+ CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CFG),
+ cqhci_readl(cq_host, CQHCI_CTL));
+ CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_IS),
+ cqhci_readl(cq_host, CQHCI_ISTE));
+ CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_ISGE),
+ cqhci_readl(cq_host, CQHCI_IC));
+ CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TDLBA),
+ cqhci_readl(cq_host, CQHCI_TDLBAU));
+ CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TDBR),
+ cqhci_readl(cq_host, CQHCI_TCN));
+ CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_DQS),
+ cqhci_readl(cq_host, CQHCI_DPT));
+ CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TCLR),
+ cqhci_readl(cq_host, CQHCI_SSC1));
+ CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_SSC2),
+ cqhci_readl(cq_host, CQHCI_CRDCT));
+ CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_RMEM),
+ cqhci_readl(cq_host, CQHCI_TERRI));
+ CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CRI),
+ cqhci_readl(cq_host, CQHCI_CRA));
+
+ if (cq_host->ops->dumpregs)
+ cq_host->ops->dumpregs(mmc);
+ else
+ CQHCI_DUMP(": ===========================================\n");
+}
+
+/**
+ * The allocated descriptor table for task, link & transfer descritors
+ * looks like:
+ * |----------|
+ * |task desc | |->|----------|
+ * |----------| | |trans desc|
+ * |link desc-|->| |----------|
+ * |----------| .
+ * . .
+ * no. of slots max-segs
+ * . |----------|
+ * |----------|
+ * The idea here is to create the [task+trans] table and mark & point the
+ * link desc to the transfer desc table on a per slot basis.
+ */
+static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
+{
+ int i = 0;
+
+ /* task descriptor can be 64/128 bit irrespective of arch */
+ if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
+ cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
+ CQHCI_TASK_DESC_SZ, CQHCI_CFG);
+ cq_host->task_desc_len = 16;
+ } else {
+ cq_host->task_desc_len = 8;
+ }
+
+ /*
+ * 96 bits length of transfer desc instead of 128 bits which means
+ * ADMA would expect next valid descriptor at the 96th bit
+ * or 128th bit
+ */
+ if (cq_host->dma64) {
+ if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
+ cq_host->trans_desc_len = 12;
+ else
+ cq_host->trans_desc_len = 16;
+ cq_host->link_desc_len = 16;
+ } else {
+ cq_host->trans_desc_len = 8;
+ cq_host->link_desc_len = 8;
+ }
+
+ /* total size of a slot: 1 task & 1 transfer (link) */
+ cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
+
+ cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
+
+ cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
+ (cq_host->num_slots - 1);
+
+ pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
+ mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
+ cq_host->slot_sz);
+
+ /*
+ * allocate a dma-mapped chunk of memory for the descriptors
+ * allocate a dma-mapped chunk of memory for link descriptors
+ * setup each link-desc memory offset per slot-number to
+ * the descriptor table.
+ */
+ cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ cq_host->desc_size,
+ &cq_host->desc_dma_base,
+ GFP_KERNEL);
+ cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ cq_host->data_size,
+ &cq_host->trans_desc_dma_base,
+ GFP_KERNEL);
+ if (!cq_host->desc_base || !cq_host->trans_desc_base)
+ return -ENOMEM;
+
+ pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
+ mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
+ (unsigned long long)cq_host->desc_dma_base,
+ (unsigned long long)cq_host->trans_desc_dma_base);
+
+ for (; i < (cq_host->num_slots); i++)
+ setup_trans_desc(cq_host, i);
+
+ return 0;
+}
+
+static void __cqhci_enable(struct cqhci_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+ u32 cqcfg;
+
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+
+ /* Configuration must not be changed while enabled */
+ if (cqcfg & CQHCI_ENABLE) {
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ }
+
+ cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
+
+ if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
+ cqcfg |= CQHCI_DCMD;
+
+ if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
+ cqcfg |= CQHCI_TASK_DESC_SZ;
+
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
+ CQHCI_TDLBA);
+ cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
+ CQHCI_TDLBAU);
+
+ cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ cqcfg |= CQHCI_ENABLE;
+
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ mmc->cqe_on = true;
+
+ if (cq_host->ops->enable)
+ cq_host->ops->enable(mmc);
+
+ /* Ensure all writes are done before interrupts are enabled */
+ wmb();
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
+
+ cq_host->activated = true;
+}
+
+static void __cqhci_disable(struct cqhci_host *cq_host)
+{
+ u32 cqcfg;
+
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cq_host->mmc->cqe_on = false;
+
+ cq_host->activated = false;
+}
+
+int cqhci_suspend(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (cq_host->enabled)
+ __cqhci_disable(cq_host);
+
+ return 0;
+}
+EXPORT_SYMBOL(cqhci_suspend);
+
+int cqhci_resume(struct mmc_host *mmc)
+{
+ /* Re-enable is done upon first request */
+ return 0;
+}
+EXPORT_SYMBOL(cqhci_resume);
+
+static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int err;
+
+ if (cq_host->enabled)
+ return 0;
+
+ cq_host->rca = card->rca;
+
+ err = cqhci_host_alloc_tdl(cq_host);
+ if (err)
+ return err;
+
+ __cqhci_enable(cq_host);
+
+ cq_host->enabled = true;
+
+#ifdef DEBUG
+ cqhci_dumpregs(cq_host);
+#endif
+ return 0;
+}
+
+/* CQHCI is idle and should halt immediately, so set a small timeout */
+#define CQHCI_OFF_TIMEOUT 100
+
+static void cqhci_off(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ ktime_t timeout;
+ bool timed_out;
+ u32 reg;
+
+ if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
+ return;
+
+ if (cq_host->ops->disable)
+ cq_host->ops->disable(mmc, false);
+
+ cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
+
+ timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
+ while (1) {
+ timed_out = ktime_compare(ktime_get(), timeout) > 0;
+ reg = cqhci_readl(cq_host, CQHCI_CTL);
+ if ((reg & CQHCI_HALT) || timed_out)
+ break;
+ }
+
+ if (timed_out)
+ pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
+ else
+ pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
+
+ mmc->cqe_on = false;
+}
+
+static void cqhci_disable(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (!cq_host->enabled)
+ return;
+
+ cqhci_off(mmc);
+
+ __cqhci_disable(cq_host);
+
+ dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
+ cq_host->trans_desc_base,
+ cq_host->trans_desc_dma_base);
+
+ dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
+ cq_host->desc_base,
+ cq_host->desc_dma_base);
+
+ cq_host->trans_desc_base = NULL;
+ cq_host->desc_base = NULL;
+
+ cq_host->enabled = false;
+}
+
+static void cqhci_prep_task_desc(struct mmc_request *mrq,
+ u64 *data, bool intr)
+{
+ u32 req_flags = mrq->data->flags;
+
+ *data = CQHCI_VALID(1) |
+ CQHCI_END(1) |
+ CQHCI_INT(intr) |
+ CQHCI_ACT(0x5) |
+ CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
+ CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
+ CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
+ CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
+ CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
+ CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
+ CQHCI_BLK_COUNT(mrq->data->blocks) |
+ CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
+
+ pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n",
+ mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data);
+}
+
+static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int sg_count;
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return -EINVAL;
+
+ sg_count = dma_map_sg(mmc_dev(host), data->sg,
+ data->sg_len,
+ (data->flags & MMC_DATA_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!sg_count) {
+ pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
+ return -ENOMEM;
+ }
+
+ return sg_count;
+}
+
+static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
+ bool dma64)
+{
+ __le32 *attr = (__le32 __force *)desc;
+
+ *attr = (CQHCI_VALID(1) |
+ CQHCI_END(end ? 1 : 0) |
+ CQHCI_INT(0) |
+ CQHCI_ACT(0x4) |
+ CQHCI_DAT_LENGTH(len));
+
+ if (dma64) {
+ __le64 *dataddr = (__le64 __force *)(desc + 4);
+
+ dataddr[0] = cpu_to_le64(addr);
+ } else {
+ __le32 *dataddr = (__le32 __force *)(desc + 4);
+
+ dataddr[0] = cpu_to_le32(addr);
+ }
+}
+
+static int cqhci_prep_tran_desc(struct mmc_request *mrq,
+ struct cqhci_host *cq_host, int tag)
+{
+ struct mmc_data *data = mrq->data;
+ int i, sg_count, len;
+ bool end = false;
+ bool dma64 = cq_host->dma64;
+ dma_addr_t addr;
+ u8 *desc;
+ struct scatterlist *sg;
+
+ sg_count = cqhci_dma_map(mrq->host, mrq);
+ if (sg_count < 0) {
+ pr_err("%s: %s: unable to map sg lists, %d\n",
+ mmc_hostname(mrq->host), __func__, sg_count);
+ return sg_count;
+ }
+
+ desc = get_trans_desc(cq_host, tag);
+
+ for_each_sg(data->sg, sg, sg_count, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ if ((i+1) == sg_count)
+ end = true;
+ cqhci_set_tran_desc(desc, addr, len, end, dma64);
+ desc += cq_host->trans_desc_len;
+ }
+
+ return 0;
+}
+
+static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ u64 *task_desc = NULL;
+ u64 data = 0;
+ u8 resp_type;
+ u8 *desc;
+ __le64 *dataddr;
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u8 timing;
+
+ if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
+ resp_type = 0x0;
+ timing = 0x1;
+ } else {
+ if (mrq->cmd->flags & MMC_RSP_R1B) {
+ resp_type = 0x3;
+ timing = 0x0;
+ } else {
+ resp_type = 0x2;
+ timing = 0x1;
+ }
+ }
+
+ task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
+ memset(task_desc, 0, cq_host->task_desc_len);
+ data |= (CQHCI_VALID(1) |
+ CQHCI_END(1) |
+ CQHCI_INT(1) |
+ CQHCI_QBAR(1) |
+ CQHCI_ACT(0x5) |
+ CQHCI_CMD_INDEX(mrq->cmd->opcode) |
+ CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
+ *task_desc |= data;
+ desc = (u8 *)task_desc;
+ pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
+ mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
+ dataddr = (__le64 __force *)(desc + 4);
+ dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
+
+}
+
+static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ if (data) {
+ dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+}
+
+static inline int cqhci_tag(struct mmc_request *mrq)
+{
+ return mrq->cmd ? DCMD_SLOT : mrq->tag;
+}
+
+static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ int err = 0;
+ u64 data = 0;
+ u64 *task_desc = NULL;
+ int tag = cqhci_tag(mrq);
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ unsigned long flags;
+
+ if (!cq_host->enabled) {
+ pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
+ return -EINVAL;
+ }
+
+ /* First request after resume has to re-enable */
+ if (!cq_host->activated)
+ __cqhci_enable(cq_host);
+
+ if (!mmc->cqe_on) {
+ cqhci_writel(cq_host, 0, CQHCI_CTL);
+ mmc->cqe_on = true;
+ pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
+ if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
+ pr_err("%s: cqhci: CQE failed to exit halt state\n",
+ mmc_hostname(mmc));
+ }
+ if (cq_host->ops->enable)
+ cq_host->ops->enable(mmc);
+ }
+
+ if (mrq->data) {
+ task_desc = (__le64 __force *)get_desc(cq_host, tag);
+ cqhci_prep_task_desc(mrq, &data, 1);
+ *task_desc = cpu_to_le64(data);
+ err = cqhci_prep_tran_desc(mrq, cq_host, tag);
+ if (err) {
+ pr_err("%s: cqhci: failed to setup tx desc: %d\n",
+ mmc_hostname(mmc), err);
+ return err;
+ }
+ } else {
+ cqhci_prep_dcmd_desc(mmc, mrq);
+ }
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+
+ if (cq_host->recovery_halt) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ cq_host->slot[tag].mrq = mrq;
+ cq_host->slot[tag].flags = 0;
+
+ cq_host->qcnt += 1;
+
+ cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
+ if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
+ pr_debug("%s: cqhci: doorbell not set for tag %d\n",
+ mmc_hostname(mmc), tag);
+out_unlock:
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ if (err)
+ cqhci_post_req(mmc, mrq);
+
+ return err;
+}
+
+static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool notify)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (!cq_host->recovery_halt) {
+ cq_host->recovery_halt = true;
+ pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
+ wake_up(&cq_host->wait_queue);
+ if (notify && mrq->recovery_notifier)
+ mrq->recovery_notifier(mrq);
+ }
+}
+
+static unsigned int cqhci_error_flags(int error1, int error2)
+{
+ int error = error1 ? error1 : error2;
+
+ switch (error) {
+ case -EILSEQ:
+ return CQHCI_HOST_CRC;
+ case -ETIMEDOUT:
+ return CQHCI_HOST_TIMEOUT;
+ default:
+ return CQHCI_HOST_OTHER;
+ }
+}
+
+static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
+ int data_error)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ struct cqhci_slot *slot;
+ u32 terri;
+ int tag;
+
+ spin_lock(&cq_host->lock);
+
+ terri = cqhci_readl(cq_host, CQHCI_TERRI);
+
+ pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
+ mmc_hostname(mmc), status, cmd_error, data_error, terri);
+
+ /* Forget about errors when recovery has already been triggered */
+ if (cq_host->recovery_halt)
+ goto out_unlock;
+
+ if (!cq_host->qcnt) {
+ WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
+ mmc_hostname(mmc), status, cmd_error, data_error,
+ terri);
+ goto out_unlock;
+ }
+
+ if (CQHCI_TERRI_C_VALID(terri)) {
+ tag = CQHCI_TERRI_C_TASK(terri);
+ slot = &cq_host->slot[tag];
+ if (slot->mrq) {
+ slot->flags = cqhci_error_flags(cmd_error, data_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ }
+ }
+
+ if (CQHCI_TERRI_D_VALID(terri)) {
+ tag = CQHCI_TERRI_D_TASK(terri);
+ slot = &cq_host->slot[tag];
+ if (slot->mrq) {
+ slot->flags = cqhci_error_flags(data_error, cmd_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ }
+ }
+
+ if (!cq_host->recovery_halt) {
+ /*
+ * The only way to guarantee forward progress is to mark at
+ * least one task in error, so if none is indicated, pick one.
+ */
+ for (tag = 0; tag < NUM_SLOTS; tag++) {
+ slot = &cq_host->slot[tag];
+ if (!slot->mrq)
+ continue;
+ slot->flags = cqhci_error_flags(data_error, cmd_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ break;
+ }
+ }
+
+out_unlock:
+ spin_unlock(&cq_host->lock);
+}
+
+static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ struct mmc_request *mrq = slot->mrq;
+ struct mmc_data *data;
+
+ if (!mrq) {
+ WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
+ mmc_hostname(mmc), tag);
+ return;
+ }
+
+ /* No completions allowed during recovery */
+ if (cq_host->recovery_halt) {
+ slot->flags |= CQHCI_COMPLETED;
+ return;
+ }
+
+ slot->mrq = NULL;
+
+ cq_host->qcnt -= 1;
+
+ data = mrq->data;
+ if (data) {
+ if (data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blksz * data->blocks;
+ }
+
+ mmc_cqe_request_done(mmc, mrq);
+}
+
+irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
+ int data_error)
+{
+ u32 status;
+ unsigned long tag = 0, comp_status;
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ status = cqhci_readl(cq_host, CQHCI_IS);
+ cqhci_writel(cq_host, status, CQHCI_IS);
+
+ pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
+
+ if ((status & CQHCI_IS_RED) || cmd_error || data_error)
+ cqhci_error_irq(mmc, status, cmd_error, data_error);
+
+ if (status & CQHCI_IS_TCC) {
+ /* read TCN and complete the request */
+ comp_status = cqhci_readl(cq_host, CQHCI_TCN);
+ cqhci_writel(cq_host, comp_status, CQHCI_TCN);
+ pr_debug("%s: cqhci: TCN: 0x%08lx\n",
+ mmc_hostname(mmc), comp_status);
+
+ spin_lock(&cq_host->lock);
+
+ for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
+ /* complete the corresponding mrq */
+ pr_debug("%s: cqhci: completing tag %lu\n",
+ mmc_hostname(mmc), tag);
+ cqhci_finish_mrq(mmc, tag);
+ }
+
+ if (cq_host->waiting_for_idle && !cq_host->qcnt) {
+ cq_host->waiting_for_idle = false;
+ wake_up(&cq_host->wait_queue);
+ }
+
+ spin_unlock(&cq_host->lock);
+ }
+
+ if (status & CQHCI_IS_TCL)
+ wake_up(&cq_host->wait_queue);
+
+ if (status & CQHCI_IS_HAC)
+ wake_up(&cq_host->wait_queue);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(cqhci_irq);
+
+static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
+{
+ unsigned long flags;
+ bool is_idle;
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ is_idle = !cq_host->qcnt || cq_host->recovery_halt;
+ *ret = cq_host->recovery_halt ? -EBUSY : 0;
+ cq_host->waiting_for_idle = !is_idle;
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ return is_idle;
+}
+
+static int cqhci_wait_for_idle(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int ret;
+
+ wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
+
+ return ret;
+}
+
+static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool *recovery_needed)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int tag = cqhci_tag(mrq);
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ unsigned long flags;
+ bool timed_out;
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ timed_out = slot->mrq == mrq;
+ if (timed_out) {
+ slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
+ cqhci_recovery_needed(mmc, mrq, false);
+ *recovery_needed = cq_host->recovery_halt;
+ }
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ if (timed_out) {
+ pr_err("%s: cqhci: timeout for tag %d\n",
+ mmc_hostname(mmc), tag);
+ cqhci_dumpregs(cq_host);
+ }
+
+ return timed_out;
+}
+
+static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
+{
+ return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
+}
+
+static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ bool ret;
+ u32 ctl;
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
+
+ ctl = cqhci_readl(cq_host, CQHCI_CTL);
+ ctl |= CQHCI_CLEAR_ALL_TASKS;
+ cqhci_writel(cq_host, ctl, CQHCI_CTL);
+
+ wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
+ msecs_to_jiffies(timeout) + 1);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ ret = cqhci_tasks_cleared(cq_host);
+
+ if (!ret)
+ pr_debug("%s: cqhci: Failed to clear tasks\n",
+ mmc_hostname(mmc));
+
+ return ret;
+}
+
+static bool cqhci_halted(struct cqhci_host *cq_host)
+{
+ return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
+}
+
+static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ bool ret;
+ u32 ctl;
+
+ if (cqhci_halted(cq_host))
+ return true;
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
+
+ ctl = cqhci_readl(cq_host, CQHCI_CTL);
+ ctl |= CQHCI_HALT;
+ cqhci_writel(cq_host, ctl, CQHCI_CTL);
+
+ wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
+ msecs_to_jiffies(timeout) + 1);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ ret = cqhci_halted(cq_host);
+
+ if (!ret)
+ pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
+
+ return ret;
+}
+
+/*
+ * After halting we expect to be able to use the command line. We interpret the
+ * failure to halt to mean the data lines might still be in use (and the upper
+ * layers will need to send a STOP command), so we set the timeout based on a
+ * generous command timeout.
+ */
+#define CQHCI_START_HALT_TIMEOUT 5
+
+static void cqhci_recovery_start(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
+
+ WARN_ON(!cq_host->recovery_halt);
+
+ cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
+
+ if (cq_host->ops->disable)
+ cq_host->ops->disable(mmc, true);
+
+ mmc->cqe_on = false;
+}
+
+static int cqhci_error_from_flags(unsigned int flags)
+{
+ if (!flags)
+ return 0;
+
+ /* CRC errors might indicate re-tuning so prefer to report that */
+ if (flags & CQHCI_HOST_CRC)
+ return -EILSEQ;
+
+ if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
+ return -ETIMEDOUT;
+
+ return -EIO;
+}
+
+static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
+{
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ struct mmc_request *mrq = slot->mrq;
+ struct mmc_data *data;
+
+ if (!mrq)
+ return;
+
+ slot->mrq = NULL;
+
+ cq_host->qcnt -= 1;
+
+ data = mrq->data;
+ if (data) {
+ data->bytes_xfered = 0;
+ data->error = cqhci_error_from_flags(slot->flags);
+ } else {
+ mrq->cmd->error = cqhci_error_from_flags(slot->flags);
+ }
+
+ mmc_cqe_request_done(cq_host->mmc, mrq);
+}
+
+static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
+{
+ int i;
+
+ for (i = 0; i < cq_host->num_slots; i++)
+ cqhci_recover_mrq(cq_host, i);
+}
+
+/*
+ * By now the command and data lines should be unused so there is no reason for
+ * CQHCI to take a long time to halt, but if it doesn't halt there could be
+ * problems clearing tasks, so be generous.
+ */
+#define CQHCI_FINISH_HALT_TIMEOUT 20
+
+/* CQHCI could be expected to clear it's internal state pretty quickly */
+#define CQHCI_CLEAR_TIMEOUT 20
+
+static void cqhci_recovery_finish(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ unsigned long flags;
+ u32 cqcfg;
+ bool ok;
+
+ pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
+
+ WARN_ON(!cq_host->recovery_halt);
+
+ ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+
+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+ ok = false;
+
+ /*
+ * The specification contradicts itself, by saying that tasks cannot be
+ * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
+ * be disabled/re-enabled, but not to disable before clearing tasks.
+ * Have a go anyway.
+ */
+ if (!ok) {
+ pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ cqcfg |= CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ /* Be sure that there are no tasks */
+ ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+ ok = false;
+ WARN_ON(!ok);
+ }
+
+ cqhci_recover_mrqs(cq_host);
+
+ WARN_ON(cq_host->qcnt);
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ cq_host->qcnt = 0;
+ cq_host->recovery_halt = false;
+ mmc->cqe_on = false;
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ /* Ensure all writes are done before interrupts are re-enabled */
+ wmb();
+
+ cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
+
+ pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
+}
+
+static const struct mmc_cqe_ops cqhci_cqe_ops = {
+ .cqe_enable = cqhci_enable,
+ .cqe_disable = cqhci_disable,
+ .cqe_request = cqhci_request,
+ .cqe_post_req = cqhci_post_req,
+ .cqe_off = cqhci_off,
+ .cqe_wait_for_idle = cqhci_wait_for_idle,
+ .cqe_timeout = cqhci_timeout,
+ .cqe_recovery_start = cqhci_recovery_start,
+ .cqe_recovery_finish = cqhci_recovery_finish,
+};
+
+struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
+{
+ struct cqhci_host *cq_host;
+ struct resource *cqhci_memres = NULL;
+
+ /* check and setup CMDQ interface */
+ cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cqhci_mem");
+ if (!cqhci_memres) {
+ dev_dbg(&pdev->dev, "CMDQ not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host)
+ return ERR_PTR(-ENOMEM);
+ cq_host->mmio = devm_ioremap(&pdev->dev,
+ cqhci_memres->start,
+ resource_size(cqhci_memres));
+ if (!cq_host->mmio) {
+ dev_err(&pdev->dev, "failed to remap cqhci regs\n");
+ return ERR_PTR(-EBUSY);
+ }
+ dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
+
+ return cq_host;
+}
+EXPORT_SYMBOL(cqhci_pltfm_init);
+
+static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
+{
+ return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
+}
+
+static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
+{
+ u32 ver = cqhci_readl(cq_host, CQHCI_VER);
+
+ return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
+}
+
+int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
+ bool dma64)
+{
+ int err;
+
+ cq_host->dma64 = dma64;
+ cq_host->mmc = mmc;
+ cq_host->mmc->cqe_private = cq_host;
+
+ cq_host->num_slots = NUM_SLOTS;
+ cq_host->dcmd_slot = DCMD_SLOT;
+
+ mmc->cqe_ops = &cqhci_cqe_ops;
+
+ mmc->cqe_qdepth = NUM_SLOTS;
+ if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
+ mmc->cqe_qdepth -= 1;
+
+ cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
+ sizeof(*cq_host->slot), GFP_KERNEL);
+ if (!cq_host->slot) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ spin_lock_init(&cq_host->lock);
+
+ init_completion(&cq_host->halt_comp);
+ init_waitqueue_head(&cq_host->wait_queue);
+
+ pr_info("%s: CQHCI version %u.%02u\n",
+ mmc_hostname(mmc), cqhci_ver_major(cq_host),
+ cqhci_ver_minor(cq_host));
+
+ return 0;
+
+out_err:
+ pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
+ mmc_hostname(mmc), cqhci_ver_major(cq_host),
+ cqhci_ver_minor(cq_host), err);
+ return err;
+}
+EXPORT_SYMBOL(cqhci_init);
+
+MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
+MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
new file mode 100644
index 0000000..9e68286
--- /dev/null
+++ b/drivers/mmc/host/cqhci.h
@@ -0,0 +1,240 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef LINUX_MMC_CQHCI_H
+#define LINUX_MMC_CQHCI_H
+
+#include <linux/compiler.h>
+#include <linux/bitops.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+#include <linux/irqreturn.h>
+#include <asm/io.h>
+
+/* registers */
+/* version */
+#define CQHCI_VER 0x00
+#define CQHCI_VER_MAJOR(x) (((x) & GENMASK(11, 8)) >> 8)
+#define CQHCI_VER_MINOR1(x) (((x) & GENMASK(7, 4)) >> 4)
+#define CQHCI_VER_MINOR2(x) ((x) & GENMASK(3, 0))
+
+/* capabilities */
+#define CQHCI_CAP 0x04
+/* configuration */
+#define CQHCI_CFG 0x08
+#define CQHCI_DCMD 0x00001000
+#define CQHCI_TASK_DESC_SZ 0x00000100
+#define CQHCI_ENABLE 0x00000001
+
+/* control */
+#define CQHCI_CTL 0x0C
+#define CQHCI_CLEAR_ALL_TASKS 0x00000100
+#define CQHCI_HALT 0x00000001
+
+/* interrupt status */
+#define CQHCI_IS 0x10
+#define CQHCI_IS_HAC BIT(0)
+#define CQHCI_IS_TCC BIT(1)
+#define CQHCI_IS_RED BIT(2)
+#define CQHCI_IS_TCL BIT(3)
+
+#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED)
+
+/* interrupt status enable */
+#define CQHCI_ISTE 0x14
+
+/* interrupt signal enable */
+#define CQHCI_ISGE 0x18
+
+/* interrupt coalescing */
+#define CQHCI_IC 0x1C
+#define CQHCI_IC_ENABLE BIT(31)
+#define CQHCI_IC_RESET BIT(16)
+#define CQHCI_IC_ICCTHWEN BIT(15)
+#define CQHCI_IC_ICCTH(x) (((x) & 0x1F) << 8)
+#define CQHCI_IC_ICTOVALWEN BIT(7)
+#define CQHCI_IC_ICTOVAL(x) ((x) & 0x7F)
+
+/* task list base address */
+#define CQHCI_TDLBA 0x20
+
+/* task list base address upper */
+#define CQHCI_TDLBAU 0x24
+
+/* door-bell */
+#define CQHCI_TDBR 0x28
+
+/* task completion notification */
+#define CQHCI_TCN 0x2C
+
+/* device queue status */
+#define CQHCI_DQS 0x30
+
+/* device pending tasks */
+#define CQHCI_DPT 0x34
+
+/* task clear */
+#define CQHCI_TCLR 0x38
+
+/* send status config 1 */
+#define CQHCI_SSC1 0x40
+
+/* send status config 2 */
+#define CQHCI_SSC2 0x44
+
+/* response for dcmd */
+#define CQHCI_CRDCT 0x48
+
+/* response mode error mask */
+#define CQHCI_RMEM 0x50
+
+/* task error info */
+#define CQHCI_TERRI 0x54
+
+#define CQHCI_TERRI_C_INDEX(x) ((x) & GENMASK(5, 0))
+#define CQHCI_TERRI_C_TASK(x) (((x) & GENMASK(12, 8)) >> 8)
+#define CQHCI_TERRI_C_VALID(x) ((x) & BIT(15))
+#define CQHCI_TERRI_D_INDEX(x) (((x) & GENMASK(21, 16)) >> 16)
+#define CQHCI_TERRI_D_TASK(x) (((x) & GENMASK(28, 24)) >> 24)
+#define CQHCI_TERRI_D_VALID(x) ((x) & BIT(31))
+
+/* command response index */
+#define CQHCI_CRI 0x58
+
+/* command response argument */
+#define CQHCI_CRA 0x5C
+
+#define CQHCI_INT_ALL 0xF
+#define CQHCI_IC_DEFAULT_ICCTH 31
+#define CQHCI_IC_DEFAULT_ICTOVAL 1
+
+/* attribute fields */
+#define CQHCI_VALID(x) (((x) & 1) << 0)
+#define CQHCI_END(x) (((x) & 1) << 1)
+#define CQHCI_INT(x) (((x) & 1) << 2)
+#define CQHCI_ACT(x) (((x) & 0x7) << 3)
+
+/* data command task descriptor fields */
+#define CQHCI_FORCED_PROG(x) (((x) & 1) << 6)
+#define CQHCI_CONTEXT(x) (((x) & 0xF) << 7)
+#define CQHCI_DATA_TAG(x) (((x) & 1) << 11)
+#define CQHCI_DATA_DIR(x) (((x) & 1) << 12)
+#define CQHCI_PRIORITY(x) (((x) & 1) << 13)
+#define CQHCI_QBAR(x) (((x) & 1) << 14)
+#define CQHCI_REL_WRITE(x) (((x) & 1) << 15)
+#define CQHCI_BLK_COUNT(x) (((x) & 0xFFFF) << 16)
+#define CQHCI_BLK_ADDR(x) (((x) & 0xFFFFFFFF) << 32)
+
+/* direct command task descriptor fields */
+#define CQHCI_CMD_INDEX(x) (((x) & 0x3F) << 16)
+#define CQHCI_CMD_TIMING(x) (((x) & 1) << 22)
+#define CQHCI_RESP_TYPE(x) (((x) & 0x3) << 23)
+
+/* transfer descriptor fields */
+#define CQHCI_DAT_LENGTH(x) (((x) & 0xFFFF) << 16)
+#define CQHCI_DAT_ADDR_LO(x) (((x) & 0xFFFFFFFF) << 32)
+#define CQHCI_DAT_ADDR_HI(x) (((x) & 0xFFFFFFFF) << 0)
+
+struct cqhci_host_ops;
+struct mmc_host;
+struct cqhci_slot;
+
+struct cqhci_host {
+ const struct cqhci_host_ops *ops;
+ void __iomem *mmio;
+ struct mmc_host *mmc;
+
+ spinlock_t lock;
+
+ /* relative card address of device */
+ unsigned int rca;
+
+ /* 64 bit DMA */
+ bool dma64;
+ int num_slots;
+ int qcnt;
+
+ u32 dcmd_slot;
+ u32 caps;
+#define CQHCI_TASK_DESC_SZ_128 0x1
+
+ u32 quirks;
+#define CQHCI_QUIRK_SHORT_TXFR_DESC_SZ 0x1
+
+ bool enabled;
+ bool halted;
+ bool init_done;
+ bool activated;
+ bool waiting_for_idle;
+ bool recovery_halt;
+
+ size_t desc_size;
+ size_t data_size;
+
+ u8 *desc_base;
+
+ /* total descriptor size */
+ u8 slot_sz;
+
+ /* 64/128 bit depends on CQHCI_CFG */
+ u8 task_desc_len;
+
+ /* 64 bit on 32-bit arch, 128 bit on 64-bit */
+ u8 link_desc_len;
+
+ u8 *trans_desc_base;
+ /* same length as transfer descriptor */
+ u8 trans_desc_len;
+
+ dma_addr_t desc_dma_base;
+ dma_addr_t trans_desc_dma_base;
+
+ struct completion halt_comp;
+ wait_queue_head_t wait_queue;
+ struct cqhci_slot *slot;
+};
+
+struct cqhci_host_ops {
+ void (*dumpregs)(struct mmc_host *mmc);
+ void (*write_l)(struct cqhci_host *host, u32 val, int reg);
+ u32 (*read_l)(struct cqhci_host *host, int reg);
+ void (*enable)(struct mmc_host *mmc);
+ void (*disable)(struct mmc_host *mmc, bool recovery);
+};
+
+static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
+{
+ if (unlikely(host->ops->write_l))
+ host->ops->write_l(host, val, reg);
+ else
+ writel_relaxed(val, host->mmio + reg);
+}
+
+static inline u32 cqhci_readl(struct cqhci_host *host, int reg)
+{
+ if (unlikely(host->ops->read_l))
+ return host->ops->read_l(host, reg);
+ else
+ return readl_relaxed(host->mmio + reg);
+}
+
+struct platform_device;
+
+irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
+ int data_error);
+int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64);
+struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev);
+int cqhci_suspend(struct mmc_host *mmc);
+int cqhci_resume(struct mmc_host *mmc);
+
+#endif
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 351330d..8e36317 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -174,7 +174,7 @@ module_param(poll_loopcount, uint, S_IRUGO);
MODULE_PARM_DESC(poll_loopcount,
"Maximum polling loop count. Default = 32");
-static unsigned __initdata use_dma = 1;
+static unsigned use_dma = 1;
module_param(use_dma, uint, 0);
MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
@@ -496,8 +496,7 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
return ret;
}
-static void __init_or_module
-davinci_release_dma_channels(struct mmc_davinci_host *host)
+static void davinci_release_dma_channels(struct mmc_davinci_host *host)
{
if (!host->use_dma)
return;
@@ -506,7 +505,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
dma_release_channel(host->dma_rx);
}
-static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
+static int davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
if (IS_ERR(host->dma_tx)) {
@@ -1201,7 +1200,7 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
return 0;
}
-static int __init davinci_mmcsd_probe(struct platform_device *pdev)
+static int davinci_mmcsd_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct mmc_davinci_host *host = NULL;
@@ -1254,8 +1253,9 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
pdev->id_entry = match->data;
ret = mmc_of_parse(mmc);
if (ret) {
- dev_err(&pdev->dev,
- "could not parse of data: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "could not parse of data: %d\n", ret);
goto parse_fail;
}
} else {
@@ -1414,11 +1414,12 @@ static struct platform_driver davinci_mmcsd_driver = {
.pm = davinci_mmcsd_pm_ops,
.of_match_table = davinci_mmc_dt_ids,
},
+ .probe = davinci_mmcsd_probe,
.remove = __exit_p(davinci_mmcsd_remove),
.id_table = davinci_mmc_devtype,
};
-module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe);
+module_platform_driver(davinci_mmcsd_driver);
MODULE_AUTHOR("Texas Instruments India");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index e0862d3..32a6a22 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1208,7 +1208,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
+ if (irq <= 0) {
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
ret = -EINVAL;
goto free_host;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index e8a1bb1..70b0df8 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -82,6 +82,10 @@ static unsigned int fmax = 515633;
* @qcom_fifo: enables qcom specific fifo pio read logic.
* @qcom_dml: enables qcom specific dma glue for dma transfers.
* @reversed_irq_handling: handle data irq before cmd irq.
+ * @mmcimask1: true if variant have a MMCIMASK1 register.
+ * @start_err: bitmask identifying the STARTBITERR bit inside MMCISTATUS
+ * register.
+ * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register
*/
struct variant_data {
unsigned int clkreg;
@@ -111,6 +115,9 @@ struct variant_data {
bool qcom_fifo;
bool qcom_dml;
bool reversed_irq_handling;
+ bool mmcimask1;
+ u32 start_err;
+ u32 opendrain;
};
static struct variant_data variant_arm = {
@@ -120,6 +127,9 @@ static struct variant_data variant_arm = {
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.reversed_irq_handling = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_arm_extended_fifo = {
@@ -128,6 +138,9 @@ static struct variant_data variant_arm_extended_fifo = {
.datalength_bits = 16,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_arm_extended_fifo_hwfc = {
@@ -137,6 +150,9 @@ static struct variant_data variant_arm_extended_fifo_hwfc = {
.datalength_bits = 16,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_u300 = {
@@ -152,6 +168,9 @@ static struct variant_data variant_u300 = {
.signal_direction = true,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_nomadik = {
@@ -168,6 +187,9 @@ static struct variant_data variant_nomadik = {
.signal_direction = true,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_ux500 = {
@@ -190,6 +212,9 @@ static struct variant_data variant_ux500 = {
.busy_detect_flag = MCI_ST_CARDBUSY,
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_ux500v2 = {
@@ -214,6 +239,26 @@ static struct variant_data variant_ux500v2 = {
.busy_detect_flag = MCI_ST_CARDBUSY,
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
+};
+
+static struct variant_data variant_stm32 = {
+ .fifosize = 32 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg = MCI_CLK_ENABLE,
+ .clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+ .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+ .datalength_bits = 24,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
+ .st_sdio = true,
+ .st_clkdiv = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .f_max = 48000000,
+ .pwrreg_clkgate = true,
+ .pwrreg_nopower = true,
};
static struct variant_data variant_qcom = {
@@ -232,6 +277,9 @@ static struct variant_data variant_qcom = {
.explicit_mclk_control = true,
.qcom_fifo = true,
.qcom_dml = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
/* Busy detection for the ST Micro variant */
@@ -396,6 +444,7 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
{
void __iomem *base = host->base;
+ struct variant_data *variant = host->variant;
if (host->singleirq) {
unsigned int mask0 = readl(base + MMCIMASK0);
@@ -406,7 +455,10 @@ static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
writel(mask0, base + MMCIMASK0);
}
- writel(mask, base + MMCIMASK1);
+ if (variant->mmcimask1)
+ writel(mask, base + MMCIMASK1);
+
+ host->mask1_reg = mask;
}
static void mmci_stop_data(struct mmci_host *host)
@@ -921,8 +973,9 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
return;
/* First check for errors */
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
- MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
+ host->variant->start_err |
+ MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
u32 remain, success;
/* Terminate the DMA transfer */
@@ -1286,7 +1339,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
status = readl(host->base + MMCISTATUS);
if (host->singleirq) {
- if (status & readl(host->base + MMCIMASK1))
+ if (status & host->mask1_reg)
mmci_pio_irq(irq, dev_id);
status &= ~MCI_IRQ1MASK;
@@ -1429,16 +1482,18 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
~MCI_ST_DATA2DIREN);
}
- if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
- if (host->hw_designer != AMBA_VENDOR_ST)
- pwr |= MCI_ROD;
- else {
- /*
- * The ST Micro variant use the ROD bit for something
- * else and only has OD (Open Drain).
- */
- pwr |= MCI_OD;
- }
+ if (variant->opendrain) {
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ pwr |= variant->opendrain;
+ } else {
+ /*
+ * If the variant cannot configure the pads by its own, then we
+ * expect the pinctrl to be able to do that for us
+ */
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ pinctrl_select_state(host->pinctrl, host->pins_opendrain);
+ else
+ pinctrl_select_state(host->pinctrl, host->pins_default);
}
/*
@@ -1583,6 +1638,35 @@ static int mmci_probe(struct amba_device *dev,
host = mmc_priv(mmc);
host->mmc = mmc;
+ /*
+ * Some variant (STM32) doesn't have opendrain bit, nevertheless
+ * pins can be set accordingly using pinctrl
+ */
+ if (!variant->opendrain) {
+ host->pinctrl = devm_pinctrl_get(&dev->dev);
+ if (IS_ERR(host->pinctrl)) {
+ dev_err(&dev->dev, "failed to get pinctrl");
+ ret = PTR_ERR(host->pinctrl);
+ goto host_free;
+ }
+
+ host->pins_default = pinctrl_lookup_state(host->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(host->pins_default)) {
+ dev_err(mmc_dev(mmc), "Can't select default pins\n");
+ ret = PTR_ERR(host->pins_default);
+ goto host_free;
+ }
+
+ host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
+ MMCI_PINCTRL_STATE_OPENDRAIN);
+ if (IS_ERR(host->pins_opendrain)) {
+ dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
+ ret = PTR_ERR(host->pins_opendrain);
+ goto host_free;
+ }
+ }
+
host->hw_designer = amba_manf(dev);
host->hw_revision = amba_rev(dev);
dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
@@ -1729,7 +1813,10 @@ static int mmci_probe(struct amba_device *dev,
spin_lock_init(&host->lock);
writel(0, host->base + MMCIMASK0);
- writel(0, host->base + MMCIMASK1);
+
+ if (variant->mmcimask1)
+ writel(0, host->base + MMCIMASK1);
+
writel(0xfff, host->base + MMCICLEAR);
/*
@@ -1809,6 +1896,7 @@ static int mmci_remove(struct amba_device *dev)
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
/*
* Undo pm_runtime_put() in probe. We use the _sync
@@ -1819,7 +1907,9 @@ static int mmci_remove(struct amba_device *dev)
mmc_remove_host(mmc);
writel(0, host->base + MMCIMASK0);
- writel(0, host->base + MMCIMASK1);
+
+ if (variant->mmcimask1)
+ writel(0, host->base + MMCIMASK1);
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
@@ -1951,6 +2041,11 @@ static const struct amba_id mmci_ids[] = {
.mask = 0xf0ffffff,
.data = &variant_ux500v2,
},
+ {
+ .id = 0x00880180,
+ .mask = 0x00ffffff,
+ .data = &variant_stm32,
+ },
/* Qualcomm variants */
{
.id = 0x00051180,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 4a8bef1..f91cdf7 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -192,6 +192,8 @@
#define NR_SG 128
+#define MMCI_PINCTRL_STATE_OPENDRAIN "opendrain"
+
struct clk;
struct variant_data;
struct dma_chan;
@@ -223,9 +225,13 @@ struct mmci_host {
u32 clk_reg;
u32 datactrl_reg;
u32 busy_status;
+ u32 mask1_reg;
bool vqmmc_enabled;
struct mmci_platform_data *plat;
struct variant_data *variant;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_opendrain;
u8 hw_designer;
u8 hw_revision:4;
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index b9dfea5..f13f798 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -35,6 +35,28 @@ struct renesas_sdhi_of_data {
unsigned short max_segs;
};
+struct tmio_mmc_dma {
+ enum dma_slave_buswidth dma_buswidth;
+ bool (*filter)(struct dma_chan *chan, void *arg);
+ void (*enable)(struct tmio_mmc_host *host, bool enable);
+ struct completion dma_dataend;
+ struct tasklet_struct dma_complete;
+};
+
+struct renesas_sdhi {
+ struct clk *clk;
+ struct clk *clk_cd;
+ struct tmio_mmc_data mmc_data;
+ struct tmio_mmc_dma dma_priv;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default, *pins_uhs;
+ void __iomem *scc_ctl;
+ u32 scc_tappos;
+};
+
+#define host_to_priv(host) \
+ container_of((host)->pdata, struct renesas_sdhi, mmc_data)
+
int renesas_sdhi_probe(struct platform_device *pdev,
const struct tmio_mmc_dma_ops *dma_ops);
int renesas_sdhi_remove(struct platform_device *pdev);
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index fcf7235..80943fa 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
@@ -46,19 +47,6 @@
#define SDHI_VER_GEN3_SD 0xcc10
#define SDHI_VER_GEN3_SDMMC 0xcd10
-#define host_to_priv(host) \
- container_of((host)->pdata, struct renesas_sdhi, mmc_data)
-
-struct renesas_sdhi {
- struct clk *clk;
- struct clk *clk_cd;
- struct tmio_mmc_data mmc_data;
- struct tmio_mmc_dma dma_priv;
- struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default, *pins_uhs;
- void __iomem *scc_ctl;
-};
-
static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
{
u32 val;
@@ -280,7 +268,7 @@ static unsigned int renesas_sdhi_init_tuning(struct tmio_mmc_host *host)
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, host->scc_tappos);
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos);
/* Read TAPNUM */
return (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL) >>
@@ -497,7 +485,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
- goto eprobe;
+ return ret;
}
/*
@@ -523,11 +511,9 @@ int renesas_sdhi_probe(struct platform_device *pdev,
"state_uhs");
}
- host = tmio_mmc_host_alloc(pdev);
- if (!host) {
- ret = -ENOMEM;
- goto eprobe;
- }
+ host = tmio_mmc_host_alloc(pdev, mmc_data);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
if (of_data) {
mmc_data->flags |= of_data->tmio_flags;
@@ -541,18 +527,18 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->bus_shift = of_data->bus_shift;
}
- host->dma = dma_priv;
host->write16_hook = renesas_sdhi_write16_hook;
host->clk_enable = renesas_sdhi_clk_enable;
host->clk_update = renesas_sdhi_clk_update;
host->clk_disable = renesas_sdhi_clk_disable;
host->multi_io_quirk = renesas_sdhi_multi_io_quirk;
+ host->dma_ops = dma_ops;
/* SDR speeds are only available on Gen2+ */
if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) {
/* card_busy caused issues on r8a73a4 (pre-Gen2) CD-less SDHI */
- host->card_busy = renesas_sdhi_card_busy;
- host->start_signal_voltage_switch =
+ host->ops.card_busy = renesas_sdhi_card_busy;
+ host->ops.start_signal_voltage_switch =
renesas_sdhi_start_signal_voltage_switch;
}
@@ -586,10 +572,14 @@ int renesas_sdhi_probe(struct platform_device *pdev,
/* All SDHI have SDIO status bits which must be 1 */
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
- ret = tmio_mmc_host_probe(host, mmc_data, dma_ops);
- if (ret < 0)
+ ret = renesas_sdhi_clk_enable(host);
+ if (ret)
goto efree;
+ ret = tmio_mmc_host_probe(host);
+ if (ret < 0)
+ goto edisclk;
+
/* One Gen2 SDHI incarnation does NOT have a CBSY bit */
if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50)
mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
@@ -606,7 +596,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
for (i = 0; i < of_data->taps_num; i++) {
if (taps[i].clk_rate == 0 ||
taps[i].clk_rate == host->mmc->f_max) {
- host->scc_tappos = taps->tap;
+ priv->scc_tappos = taps->tap;
hit = true;
break;
}
@@ -650,20 +640,24 @@ int renesas_sdhi_probe(struct platform_device *pdev,
eirq:
tmio_mmc_host_remove(host);
+edisclk:
+ renesas_sdhi_clk_disable(host);
efree:
tmio_mmc_host_free(host);
-eprobe:
+
return ret;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
int renesas_sdhi_remove(struct platform_device *pdev)
{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = platform_get_drvdata(pdev);
tmio_mmc_host_remove(host);
+ renesas_sdhi_clk_disable(host);
return 0;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 41cbe84..7c03cfe 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -103,6 +103,8 @@ renesas_sdhi_internal_dmac_dm_write(struct tmio_mmc_host *host,
static void
renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
if (!host->chan_tx || !host->chan_rx)
return;
@@ -110,8 +112,8 @@ renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1,
INFO1_CLEAR);
- if (host->dma->enable)
- host->dma->enable(host, enable);
+ if (priv->dma_priv.enable)
+ priv->dma_priv.enable(host, enable);
}
static void
@@ -130,7 +132,9 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
static void
renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host) {
- tasklet_schedule(&host->dma_complete);
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ tasklet_schedule(&priv->dma_priv.dma_complete);
}
static void
@@ -220,10 +224,12 @@ static void
renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
/* Each value is set to non-zero to assume "enabling" each DMA */
host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
- tasklet_init(&host->dma_complete,
+ tasklet_init(&priv->dma_priv.dma_complete,
renesas_sdhi_internal_dmac_complete_tasklet_fn,
(unsigned long)host);
tasklet_init(&host->dma_issue,
@@ -255,6 +261,7 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
{ .soc_id = "r8a7795", .revision = "ES1.*" },
{ .soc_id = "r8a7795", .revision = "ES2.0" },
{ .soc_id = "r8a7796", .revision = "ES1.0" },
+ { .soc_id = "r8a77995", .revision = "ES1.0" },
{ /* sentinel */ }
};
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 9ab1043..82d757c 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -117,11 +117,13 @@ MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host,
bool enable)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
if (!host->chan_tx || !host->chan_rx)
return;
- if (host->dma->enable)
- host->dma->enable(host, enable);
+ if (priv->dma_priv.enable)
+ priv->dma_priv.enable(host, enable);
}
static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
@@ -138,12 +140,15 @@ static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host)
{
- complete(&host->dma_dataend);
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ complete(&priv->dma_priv.dma_dataend);
}
static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
{
struct tmio_mmc_host *host = arg;
+ struct renesas_sdhi *priv = host_to_priv(host);
spin_lock_irq(&host->lock);
@@ -161,7 +166,7 @@ static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
spin_unlock_irq(&host->lock);
- wait_for_completion(&host->dma_dataend);
+ wait_for_completion(&priv->dma_priv.dma_dataend);
spin_lock_irq(&host->lock);
tmio_mmc_do_data_irq(host);
@@ -171,6 +176,7 @@ out:
static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
@@ -214,7 +220,7 @@ static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
DMA_CTRL_ACK);
if (desc) {
- reinit_completion(&host->dma_dataend);
+ reinit_completion(&priv->dma_priv.dma_dataend);
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
desc->callback_param = host;
@@ -245,6 +251,7 @@ pio:
static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
@@ -293,7 +300,7 @@ static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
DMA_CTRL_ACK);
if (desc) {
- reinit_completion(&host->dma_dataend);
+ reinit_completion(&priv->dma_priv.dma_dataend);
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
desc->callback_param = host;
@@ -341,7 +348,7 @@ static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
spin_lock_irq(&host->lock);
- if (host && host->data) {
+ if (host->data) {
if (host->data->flags & MMC_DATA_READ)
chan = host->chan_rx;
else
@@ -359,9 +366,11 @@ static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
/* We can only either use DMA for both Tx and Rx or not use it at all */
- if (!host->dma || (!host->pdev->dev.of_node &&
- (!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
+ if (!host->pdev->dev.of_node &&
+ (!pdata->chan_priv_tx || !pdata->chan_priv_rx))
return;
if (!host->chan_tx && !host->chan_rx) {
@@ -378,7 +387,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
dma_cap_set(DMA_SLAVE, mask);
host->chan_tx = dma_request_slave_channel_compat(mask,
- host->dma->filter, pdata->chan_priv_tx,
+ priv->dma_priv.filter, pdata->chan_priv_tx,
&host->pdev->dev, "tx");
dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
@@ -389,7 +398,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = res->start +
(CTL_SD_DATA_PORT << host->bus_shift);
- cfg.dst_addr_width = host->dma->dma_buswidth;
+ cfg.dst_addr_width = priv->dma_priv.dma_buswidth;
if (!cfg.dst_addr_width)
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.src_addr = 0;
@@ -398,7 +407,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
goto ecfgtx;
host->chan_rx = dma_request_slave_channel_compat(mask,
- host->dma->filter, pdata->chan_priv_rx,
+ priv->dma_priv.filter, pdata->chan_priv_rx,
&host->pdev->dev, "rx");
dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
@@ -408,7 +417,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
cfg.direction = DMA_DEV_TO_MEM;
cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
- cfg.src_addr_width = host->dma->dma_buswidth;
+ cfg.src_addr_width = priv->dma_priv.dma_buswidth;
if (!cfg.src_addr_width)
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.dst_addr = 0;
@@ -420,7 +429,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
if (!host->bounce_buf)
goto ebouncebuf;
- init_completion(&host->dma_dataend);
+ init_completion(&priv->dma_priv.dma_dataend);
tasklet_init(&host->dma_issue,
renesas_sdhi_sys_dmac_issue_tasklet_fn,
(unsigned long)host);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 0848dc0..30bd808 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -30,7 +30,7 @@
#include <linux/mmc/sd.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/card.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <asm/unaligned.h>
struct realtek_pci_sdmmc {
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 76da168..7842207 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -31,7 +31,7 @@
#include <linux/scatterlist.h>
#include <linux/pm_runtime.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
#include <asm/unaligned.h>
#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index f7f157a..f774936 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1424,7 +1424,9 @@ static const struct file_operations s3cmci_fops_state = {
struct s3cmci_reg {
unsigned short addr;
unsigned char *name;
-} debug_regs[] = {
+};
+
+static const struct s3cmci_reg debug_regs[] = {
DBG_REG(CON),
DBG_REG(PRE),
DBG_REG(CMDARG),
@@ -1446,7 +1448,7 @@ struct s3cmci_reg {
static int s3cmci_regs_show(struct seq_file *seq, void *v)
{
struct s3cmci_host *host = seq->private;
- struct s3cmci_reg *rptr = debug_regs;
+ const struct s3cmci_reg *rptr = debug_regs;
for (; rptr->name; rptr++)
seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
@@ -1658,7 +1660,7 @@ static int s3cmci_probe(struct platform_device *pdev)
}
host->irq = platform_get_irq(pdev, 0);
- if (host->irq == 0) {
+ if (host->irq <= 0) {
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
ret = -EINVAL;
goto probe_iounmap;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index b988997..4065da5 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -76,6 +76,7 @@ struct sdhci_acpi_slot {
size_t priv_size;
int (*probe_slot)(struct platform_device *, const char *, const char *);
int (*remove_slot)(struct platform_device *);
+ int (*setup_host)(struct platform_device *pdev);
};
struct sdhci_acpi_host {
@@ -96,14 +97,21 @@ static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
return c->slot && (c->slot->flags & flag);
}
+#define INTEL_DSM_HS_CAPS_SDR25 BIT(0)
+#define INTEL_DSM_HS_CAPS_DDR50 BIT(1)
+#define INTEL_DSM_HS_CAPS_SDR50 BIT(2)
+#define INTEL_DSM_HS_CAPS_SDR104 BIT(3)
+
enum {
INTEL_DSM_FNS = 0,
INTEL_DSM_V18_SWITCH = 3,
INTEL_DSM_V33_SWITCH = 4,
+ INTEL_DSM_HS_CAPS = 8,
};
struct intel_host {
u32 dsm_fns;
+ u32 hs_caps;
};
static const guid_t intel_dsm_guid =
@@ -152,6 +160,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
{
int err;
+ intel_host->hs_caps = ~0;
+
err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
if (err) {
pr_debug("%s: DSM not supported, error %d\n",
@@ -161,6 +171,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
pr_debug("%s: DSM function mask %#x\n",
mmc_hostname(mmc), intel_host->dsm_fns);
+
+ intel_dsm(intel_host, dev, INTEL_DSM_HS_CAPS, &intel_host->hs_caps);
}
static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
@@ -398,6 +410,26 @@ static int intel_probe_slot(struct platform_device *pdev, const char *hid,
return 0;
}
+static int intel_setup_host(struct platform_device *pdev)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR25))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR25;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR50))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR50;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_DDR50))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_DDR50;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR104))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR104;
+
+ return 0;
+}
+
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.chip = &sdhci_acpi_chip_int,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
@@ -409,6 +441,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
SDHCI_QUIRK2_STOP_WITH_TC |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -421,6 +454,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
.flags = SDHCI_ACPI_RUNTIME_PM,
.pm_caps = MMC_PM_KEEP_POWER,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -432,6 +466,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
SDHCI_QUIRK2_STOP_WITH_TC,
.caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -446,6 +481,83 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
.caps = MMC_CAP_NONREMOVABLE,
};
+/* AMD sdhci reset dll register. */
+#define SDHCI_AMD_RESET_DLL_REGISTER 0x908
+
+static int amd_select_drive_strength(struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type)
+{
+ return MMC_SET_DRIVER_TYPE_A;
+}
+
+static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host)
+{
+ /* AMD Platform requires dll setting */
+ sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER);
+ usleep_range(10, 20);
+ sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
+}
+
+/*
+ * For AMD Platform it is required to disable the tuning
+ * bit first controller to bring to HS Mode from HS200
+ * mode, later enable to tune to HS400 mode.
+ */
+static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned int old_timing = host->timing;
+
+ sdhci_set_ios(mmc, ios);
+ if (old_timing == MMC_TIMING_MMC_HS200 &&
+ ios->timing == MMC_TIMING_MMC_HS)
+ sdhci_writew(host, 0x9, SDHCI_HOST_CONTROL2);
+ if (old_timing != MMC_TIMING_MMC_HS400 &&
+ ios->timing == MMC_TIMING_MMC_HS400) {
+ sdhci_writew(host, 0x80, SDHCI_HOST_CONTROL2);
+ sdhci_acpi_amd_hs400_dll(host);
+ }
+}
+
+static const struct sdhci_ops sdhci_acpi_ops_amd = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
+ .ops = &sdhci_acpi_ops_amd,
+};
+
+static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
+ const char *hid, const char *uid)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct sdhci_host *host = c->host;
+
+ sdhci_read_caps(host);
+ if (host->caps1 & SDHCI_SUPPORT_DDR50)
+ host->mmc->caps = MMC_CAP_1_8V_DDR;
+
+ if ((host->caps1 & SDHCI_SUPPORT_SDR104) &&
+ (host->mmc->caps & MMC_CAP_1_8V_DDR))
+ host->mmc->caps2 = MMC_CAP2_HS400_1_8V;
+
+ host->mmc_host_ops.select_drive_strength = amd_select_drive_strength;
+ host->mmc_host_ops.set_ios = amd_set_ios;
+ return 0;
+}
+
+static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
+ .chip = &sdhci_acpi_chip_amd,
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .probe_slot = sdhci_acpi_emmc_amd_probe_slot,
+};
+
struct sdhci_acpi_uid_slot {
const char *hid;
const char *uid;
@@ -469,6 +581,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "PNP0D40" },
{ "QCOM8051", NULL, &sdhci_acpi_slot_qcom_sd_3v },
{ "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd },
+ { "AMDI0040", NULL, &sdhci_acpi_slot_amd_emmc },
{ },
};
@@ -485,6 +598,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "PNP0D40" },
{ "QCOM8051" },
{ "QCOM8052" },
+ { "AMDI0040" },
{ },
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
@@ -566,6 +680,10 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
host->hw_name = "ACPI";
host->ops = &sdhci_acpi_ops_dflt;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ err = -EINVAL;
+ goto err_free;
+ }
host->ioaddr = devm_ioremap_nocache(dev, iomem->start,
resource_size(iomem));
@@ -609,10 +727,20 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
}
}
- err = sdhci_add_host(host);
+ err = sdhci_setup_host(host);
if (err)
goto err_free;
+ if (c->slot && c->slot->setup_host) {
+ err = c->slot->setup_host(pdev);
+ if (err)
+ goto err_cleanup;
+ }
+
+ err = __sdhci_add_host(host);
+ if (err)
+ goto err_cleanup;
+
if (c->use_runtime_pm) {
pm_runtime_set_active(dev);
pm_suspend_ignore_children(dev, 1);
@@ -625,6 +753,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
return 0;
+err_cleanup:
+ sdhci_cleanup_host(c->host);
err_free:
sdhci_free_host(c->host);
return err;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 85140c9..cd2b5f6 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -193,6 +193,7 @@ struct pltfm_imx_data {
struct clk *clk_ipg;
struct clk *clk_ahb;
struct clk *clk_per;
+ unsigned int actual_clock;
enum {
NO_CMD_PENDING, /* no multiblock command pending */
MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
@@ -687,6 +688,20 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
return;
}
+ /* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */
+ if (is_imx53_esdhc(imx_data)) {
+ /*
+ * According to the i.MX53 reference manual, if DLLCTRL[10] can
+ * be set, then the controller is eSDHCv3, else it is eSDHCv2.
+ */
+ val = readl(host->ioaddr + ESDHC_DLL_CTRL);
+ writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL);
+ temp = readl(host->ioaddr + ESDHC_DLL_CTRL);
+ writel(val, host->ioaddr + ESDHC_DLL_CTRL);
+ if (temp & BIT(10))
+ pre_div = 2;
+ }
+
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
| ESDHC_CLOCK_MASK);
@@ -1389,11 +1404,15 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
int ret;
ret = sdhci_runtime_suspend_host(host);
+ if (ret)
+ return ret;
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
if (!sdhci_sdio_irq_enabled(host)) {
+ imx_data->actual_clock = host->mmc->actual_clock;
+ esdhc_pltfm_set_clock(host, 0);
clk_disable_unprepare(imx_data->clk_per);
clk_disable_unprepare(imx_data->clk_ipg);
}
@@ -1409,31 +1428,34 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int err;
+ err = clk_prepare_enable(imx_data->clk_ahb);
+ if (err)
+ return err;
+
if (!sdhci_sdio_irq_enabled(host)) {
err = clk_prepare_enable(imx_data->clk_per);
if (err)
- return err;
+ goto disable_ahb_clk;
err = clk_prepare_enable(imx_data->clk_ipg);
if (err)
goto disable_per_clk;
+ esdhc_pltfm_set_clock(host, imx_data->actual_clock);
}
- err = clk_prepare_enable(imx_data->clk_ahb);
- if (err)
- goto disable_ipg_clk;
+
err = sdhci_runtime_resume_host(host);
if (err)
- goto disable_ahb_clk;
+ goto disable_ipg_clk;
return 0;
-disable_ahb_clk:
- clk_disable_unprepare(imx_data->clk_ahb);
disable_ipg_clk:
if (!sdhci_sdio_irq_enabled(host))
clk_disable_unprepare(imx_data->clk_ipg);
disable_per_clk:
if (!sdhci_sdio_irq_enabled(host))
clk_disable_unprepare(imx_data->clk_per);
+disable_ahb_clk:
+ clk_disable_unprepare(imx_data->clk_ahb);
return err;
}
#endif
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 0720ea7..c33a5f7 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -25,11 +25,13 @@
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
-#include "sdhci-pltfm.h"
#include <linux/of.h>
-#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+#include "cqhci.h"
+#include "sdhci-pltfm.h"
+#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
#define VENDOR_ENHANCED_STROBE BIT(0)
#define PHY_CLK_TOO_SLOW_HZ 400000
@@ -90,6 +92,7 @@ struct sdhci_arasan_data {
struct phy *phy;
bool is_phy_on;
+ bool has_cqe;
struct clk_hw sdcardclk_hw;
struct clk *sdcardclk;
@@ -262,6 +265,17 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
return -EINVAL;
}
+static void sdhci_arasan_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ }
+ sdhci_set_power_noreg(host, mode, vdd);
+}
+
static const struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -269,6 +283,7 @@ static const struct sdhci_ops sdhci_arasan_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_arasan_set_power,
};
static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
@@ -278,6 +293,62 @@ static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
+static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static void sdhci_arasan_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static void sdhci_arasan_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ }
+
+ sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops sdhci_arasan_cqhci_ops = {
+ .enable = sdhci_arasan_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_arasan_dumpregs,
+};
+
+static const struct sdhci_ops sdhci_arasan_cqe_ops = {
+ .set_clock = sdhci_arasan_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_arasan_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_arasan_set_power,
+ .irq = sdhci_arasan_cqhci_irq,
+};
+
+static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = {
+ .ops = &sdhci_arasan_cqe_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -297,6 +368,12 @@ static int sdhci_arasan_suspend(struct device *dev)
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
+ if (sdhci_arasan->has_cqe) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
ret = sdhci_suspend_host(host);
if (ret)
return ret;
@@ -353,7 +430,16 @@ static int sdhci_arasan_resume(struct device *dev)
sdhci_arasan->is_phy_on = true;
}
- return sdhci_resume_host(host);
+ ret = sdhci_resume_host(host);
+ if (ret) {
+ dev_err(dev, "Cannot resume host.\n");
+ return ret;
+ }
+
+ if (sdhci_arasan->has_cqe)
+ return cqhci_resume(host->mmc);
+
+ return 0;
}
#endif /* ! CONFIG_PM_SLEEP */
@@ -556,6 +642,49 @@ static void sdhci_arasan_unregister_sdclk(struct device *dev)
of_clk_del_provider(dev->of_node);
}
+static int sdhci_arasan_add_host(struct sdhci_arasan_data *sdhci_arasan)
+{
+ struct sdhci_host *host = sdhci_arasan->host;
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ if (!sdhci_arasan->has_cqe)
+ return sdhci_add_host(host);
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(host->mmc->parent,
+ sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR;
+ cq_host->ops = &sdhci_arasan_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64)
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
static int sdhci_arasan_probe(struct platform_device *pdev)
{
int ret;
@@ -566,9 +695,15 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_arasan_data *sdhci_arasan;
struct device_node *np = pdev->dev.of_node;
+ const struct sdhci_pltfm_data *pdata;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-5.1"))
+ pdata = &sdhci_arasan_cqe_pdata;
+ else
+ pdata = &sdhci_arasan_pdata;
+
+ host = sdhci_pltfm_init(pdev, pdata, sizeof(*sdhci_arasan));
- host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata,
- sizeof(*sdhci_arasan));
if (IS_ERR(host))
return PTR_ERR(host);
@@ -663,9 +798,11 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_arasan_hs400_enhanced_strobe;
host->mmc_host_ops.start_signal_voltage_switch =
sdhci_arasan_voltage_switch;
+ sdhci_arasan->has_cqe = true;
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
}
- ret = sdhci_add_host(host);
+ ret = sdhci_arasan_add_host(sdhci_arasan);
if (ret)
goto err_add_host;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 1f42437..4ffa6b1 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -589,10 +589,18 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
+ u32 val;
+
sdhci_reset(host, mask);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+ if (mask & SDHCI_RESET_ALL) {
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_EN;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+ }
}
/* The SCFG, Supplemental Configuration Unit, provides SoC specific
diff --git a/drivers/mmc/host/sdhci-pci-arasan.c b/drivers/mmc/host/sdhci-pci-arasan.c
new file mode 100644
index 0000000..499f320
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci-arasan.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sdhci-pci-arasan.c - Driver for Arasan PCI Controller with
+ * integrated phy.
+ *
+ * Copyright (C) 2017 Arasan Chip Systems Inc.
+ *
+ * Author: Atul Garg <agarg@arasan.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "sdhci.h"
+#include "sdhci-pci.h"
+
+/* Extra registers for Arasan SD/SDIO/MMC Host Controller with PHY */
+#define PHY_ADDR_REG 0x300
+#define PHY_DAT_REG 0x304
+
+#define PHY_WRITE BIT(8)
+#define PHY_BUSY BIT(9)
+#define DATA_MASK 0xFF
+
+/* PHY Specific Registers */
+#define DLL_STATUS 0x00
+#define IPAD_CTRL1 0x01
+#define IPAD_CTRL2 0x02
+#define IPAD_STS 0x03
+#define IOREN_CTRL1 0x06
+#define IOREN_CTRL2 0x07
+#define IOPU_CTRL1 0x08
+#define IOPU_CTRL2 0x09
+#define ITAP_DELAY 0x0C
+#define OTAP_DELAY 0x0D
+#define STRB_SEL 0x0E
+#define CLKBUF_SEL 0x0F
+#define MODE_CTRL 0x11
+#define DLL_TRIM 0x12
+#define CMD_CTRL 0x20
+#define DATA_CTRL 0x21
+#define STRB_CTRL 0x22
+#define CLK_CTRL 0x23
+#define PHY_CTRL 0x24
+
+#define DLL_ENBL BIT(3)
+#define RTRIM_EN BIT(1)
+#define PDB_ENBL BIT(1)
+#define RETB_ENBL BIT(6)
+#define ODEN_CMD BIT(1)
+#define ODEN_DAT 0xFF
+#define REN_STRB BIT(0)
+#define REN_CMND BIT(1)
+#define REN_DATA 0xFF
+#define PU_CMD BIT(1)
+#define PU_DAT 0xFF
+#define ITAPDLY_EN BIT(0)
+#define OTAPDLY_EN BIT(0)
+#define OD_REL_CMD BIT(1)
+#define OD_REL_DAT 0xFF
+#define DLLTRM_ICP 0x8
+#define PDB_CMND BIT(0)
+#define PDB_DATA 0xFF
+#define PDB_STRB BIT(0)
+#define PDB_CLOCK BIT(0)
+#define CALDONE_MASK 0x10
+#define DLL_RDY_MASK 0x10
+#define MAX_CLK_BUF 0x7
+
+/* Mode Controls */
+#define ENHSTRB_MODE BIT(0)
+#define HS400_MODE BIT(1)
+#define LEGACY_MODE BIT(2)
+#define DDR50_MODE BIT(3)
+
+/*
+ * Controller has no specific bits for HS200/HS.
+ * Used BIT(4), BIT(5) for software programming.
+ */
+#define HS200_MODE BIT(4)
+#define HISPD_MODE BIT(5)
+
+#define OTAPDLY(x) (((x) << 1) | OTAPDLY_EN)
+#define ITAPDLY(x) (((x) << 1) | ITAPDLY_EN)
+#define FREQSEL(x) (((x) << 5) | DLL_ENBL)
+#define IOPAD(x, y) ((x) | ((y) << 2))
+
+/* Arasan private data */
+struct arasan_host {
+ u32 chg_clk;
+};
+
+static int arasan_phy_addr_poll(struct sdhci_host *host, u32 offset, u32 mask)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), 100);
+ bool failed;
+ u8 val = 0;
+
+ while (1) {
+ failed = ktime_after(ktime_get(), timeout);
+ val = sdhci_readw(host, PHY_ADDR_REG);
+ if (!(val & mask))
+ return 0;
+ if (failed)
+ return -EBUSY;
+ }
+}
+
+static int arasan_phy_write(struct sdhci_host *host, u8 data, u8 offset)
+{
+ sdhci_writew(host, data, PHY_DAT_REG);
+ sdhci_writew(host, (PHY_WRITE | offset), PHY_ADDR_REG);
+ return arasan_phy_addr_poll(host, PHY_ADDR_REG, PHY_BUSY);
+}
+
+static int arasan_phy_read(struct sdhci_host *host, u8 offset, u8 *data)
+{
+ int ret;
+
+ sdhci_writew(host, 0, PHY_DAT_REG);
+ sdhci_writew(host, offset, PHY_ADDR_REG);
+ ret = arasan_phy_addr_poll(host, PHY_ADDR_REG, PHY_BUSY);
+
+ /* Masking valid data bits */
+ *data = sdhci_readw(host, PHY_DAT_REG) & DATA_MASK;
+ return ret;
+}
+
+static int arasan_phy_sts_poll(struct sdhci_host *host, u32 offset, u32 mask)
+{
+ int ret;
+ ktime_t timeout = ktime_add_us(ktime_get(), 100);
+ bool failed;
+ u8 val = 0;
+
+ while (1) {
+ failed = ktime_after(ktime_get(), timeout);
+ ret = arasan_phy_read(host, offset, &val);
+ if (ret)
+ return -EBUSY;
+ else if (val & mask)
+ return 0;
+ if (failed)
+ return -EBUSY;
+ }
+}
+
+/* Initialize the Arasan PHY */
+static int arasan_phy_init(struct sdhci_host *host)
+{
+ int ret;
+ u8 val;
+
+ /* Program IOPADs and wait for calibration to be done */
+ if (arasan_phy_read(host, IPAD_CTRL1, &val) ||
+ arasan_phy_write(host, val | RETB_ENBL | PDB_ENBL, IPAD_CTRL1) ||
+ arasan_phy_read(host, IPAD_CTRL2, &val) ||
+ arasan_phy_write(host, val | RTRIM_EN, IPAD_CTRL2))
+ return -EBUSY;
+ ret = arasan_phy_sts_poll(host, IPAD_STS, CALDONE_MASK);
+ if (ret)
+ return -EBUSY;
+
+ /* Program CMD/Data lines */
+ if (arasan_phy_read(host, IOREN_CTRL1, &val) ||
+ arasan_phy_write(host, val | REN_CMND | REN_STRB, IOREN_CTRL1) ||
+ arasan_phy_read(host, IOPU_CTRL1, &val) ||
+ arasan_phy_write(host, val | PU_CMD, IOPU_CTRL1) ||
+ arasan_phy_read(host, CMD_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_CMND, CMD_CTRL) ||
+ arasan_phy_read(host, IOREN_CTRL2, &val) ||
+ arasan_phy_write(host, val | REN_DATA, IOREN_CTRL2) ||
+ arasan_phy_read(host, IOPU_CTRL2, &val) ||
+ arasan_phy_write(host, val | PU_DAT, IOPU_CTRL2) ||
+ arasan_phy_read(host, DATA_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_DATA, DATA_CTRL) ||
+ arasan_phy_read(host, STRB_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_STRB, STRB_CTRL) ||
+ arasan_phy_read(host, CLK_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_CLOCK, CLK_CTRL) ||
+ arasan_phy_read(host, CLKBUF_SEL, &val) ||
+ arasan_phy_write(host, val | MAX_CLK_BUF, CLKBUF_SEL) ||
+ arasan_phy_write(host, LEGACY_MODE, MODE_CTRL))
+ return -EBUSY;
+ return 0;
+}
+
+/* Set Arasan PHY for different modes */
+static int arasan_phy_set(struct sdhci_host *host, u8 mode, u8 otap,
+ u8 drv_type, u8 itap, u8 trim, u8 clk)
+{
+ u8 val;
+ int ret;
+
+ if (mode == HISPD_MODE || mode == HS200_MODE)
+ ret = arasan_phy_write(host, 0x0, MODE_CTRL);
+ else
+ ret = arasan_phy_write(host, mode, MODE_CTRL);
+ if (ret)
+ return ret;
+ if (mode == HS400_MODE || mode == HS200_MODE) {
+ ret = arasan_phy_read(host, IPAD_CTRL1, &val);
+ if (ret)
+ return ret;
+ ret = arasan_phy_write(host, IOPAD(val, drv_type), IPAD_CTRL1);
+ if (ret)
+ return ret;
+ }
+ if (mode == LEGACY_MODE) {
+ ret = arasan_phy_write(host, 0x0, OTAP_DELAY);
+ if (ret)
+ return ret;
+ ret = arasan_phy_write(host, 0x0, ITAP_DELAY);
+ } else {
+ ret = arasan_phy_write(host, OTAPDLY(otap), OTAP_DELAY);
+ if (ret)
+ return ret;
+ if (mode != HS200_MODE)
+ ret = arasan_phy_write(host, ITAPDLY(itap), ITAP_DELAY);
+ else
+ ret = arasan_phy_write(host, 0x0, ITAP_DELAY);
+ }
+ if (ret)
+ return ret;
+ if (mode != LEGACY_MODE) {
+ ret = arasan_phy_write(host, trim, DLL_TRIM);
+ if (ret)
+ return ret;
+ }
+ ret = arasan_phy_write(host, 0, DLL_STATUS);
+ if (ret)
+ return ret;
+ if (mode != LEGACY_MODE) {
+ ret = arasan_phy_write(host, FREQSEL(clk), DLL_STATUS);
+ if (ret)
+ return ret;
+ ret = arasan_phy_sts_poll(host, DLL_STATUS, DLL_RDY_MASK);
+ if (ret)
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int arasan_select_phy_clock(struct sdhci_host *host)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct arasan_host *arasan_host = sdhci_pci_priv(slot);
+ u8 clk;
+
+ if (arasan_host->chg_clk == host->mmc->ios.clock)
+ return 0;
+
+ arasan_host->chg_clk = host->mmc->ios.clock;
+ if (host->mmc->ios.clock == 200000000)
+ clk = 0x0;
+ else if (host->mmc->ios.clock == 100000000)
+ clk = 0x2;
+ else if (host->mmc->ios.clock == 50000000)
+ clk = 0x1;
+ else
+ clk = 0x0;
+
+ if (host->mmc_host_ops.hs400_enhanced_strobe) {
+ arasan_phy_set(host, ENHSTRB_MODE, 1, 0x0, 0x0,
+ DLLTRM_ICP, clk);
+ } else {
+ switch (host->mmc->ios.timing) {
+ case MMC_TIMING_LEGACY:
+ arasan_phy_set(host, LEGACY_MODE, 0x0, 0x0, 0x0,
+ 0x0, 0x0);
+ break;
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ arasan_phy_set(host, HISPD_MODE, 0x3, 0x0, 0x2,
+ DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_UHS_SDR104:
+ arasan_phy_set(host, HS200_MODE, 0x2,
+ host->mmc->ios.drv_type, 0x0,
+ DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ case MMC_TIMING_UHS_DDR50:
+ arasan_phy_set(host, DDR50_MODE, 0x1, 0x0,
+ 0x0, DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_HS400:
+ arasan_phy_set(host, HS400_MODE, 0x1,
+ host->mmc->ios.drv_type, 0xa,
+ DLLTRM_ICP, clk);
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int arasan_pci_probe_slot(struct sdhci_pci_slot *slot)
+{
+ int err;
+
+ slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | MMC_CAP_8_BIT_DATA;
+ err = arasan_phy_init(slot->host);
+ if (err)
+ return -ENODEV;
+ return 0;
+}
+
+static void arasan_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ sdhci_set_clock(host, clock);
+
+ /* Change phy settings for the new clock */
+ arasan_select_phy_clock(host);
+}
+
+static const struct sdhci_ops arasan_sdhci_pci_ops = {
+ .set_clock = arasan_sdhci_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+const struct sdhci_pci_fixes sdhci_arasan = {
+ .probe_slot = arasan_pci_probe_slot,
+ .ops = &arasan_sdhci_pci_ops,
+ .priv_size = sizeof(struct arasan_host),
+};
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 3e4f04f..6d1a983 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -30,17 +30,37 @@
#include <linux/mmc/sdhci-pci-data.h>
#include <linux/acpi.h>
+#include "cqhci.h"
+
#include "sdhci.h"
#include "sdhci-pci.h"
-static int sdhci_pci_enable_dma(struct sdhci_host *host);
static void sdhci_pci_hw_reset(struct sdhci_host *host);
#ifdef CONFIG_PM_SLEEP
-static int __sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
+static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
+{
+ mmc_pm_flag_t pm_flags = 0;
+ int i;
+
+ for (i = 0; i < chip->num_slots; i++) {
+ struct sdhci_pci_slot *slot = chip->slots[i];
+
+ if (slot)
+ pm_flags |= slot->host->mmc->pm_flags;
+ }
+
+ return device_set_wakeup_enable(&chip->pdev->dev,
+ (pm_flags & MMC_PM_KEEP_POWER) &&
+ (pm_flags & MMC_PM_WAKE_SDIO_IRQ));
+}
+
+static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
{
int i, ret;
+ sdhci_pci_init_wakeup(chip);
+
for (i = 0; i < chip->num_slots; i++) {
struct sdhci_pci_slot *slot = chip->slots[i];
struct sdhci_host *host;
@@ -56,9 +76,6 @@ static int __sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
ret = sdhci_suspend_host(host);
if (ret)
goto err_pci_suspend;
-
- if (host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)
- sdhci_enable_irq_wakeups(host);
}
return 0;
@@ -69,52 +86,44 @@ err_pci_suspend:
return ret;
}
-static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
+int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
{
- mmc_pm_flag_t pm_flags = 0;
- int i;
+ struct sdhci_pci_slot *slot;
+ int i, ret;
for (i = 0; i < chip->num_slots; i++) {
- struct sdhci_pci_slot *slot = chip->slots[i];
+ slot = chip->slots[i];
+ if (!slot)
+ continue;
- if (slot)
- pm_flags |= slot->host->mmc->pm_flags;
+ ret = sdhci_resume_host(slot->host);
+ if (ret)
+ return ret;
}
- return device_init_wakeup(&chip->pdev->dev,
- (pm_flags & MMC_PM_KEEP_POWER) &&
- (pm_flags & MMC_PM_WAKE_SDIO_IRQ));
+ return 0;
}
-static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
+static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip)
{
int ret;
- ret = __sdhci_pci_suspend_host(chip);
+ ret = cqhci_suspend(chip->slots[0]->host->mmc);
if (ret)
return ret;
- sdhci_pci_init_wakeup(chip);
-
- return 0;
+ return sdhci_pci_suspend_host(chip);
}
-int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
+static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip)
{
- struct sdhci_pci_slot *slot;
- int i, ret;
-
- for (i = 0; i < chip->num_slots; i++) {
- slot = chip->slots[i];
- if (!slot)
- continue;
+ int ret;
- ret = sdhci_resume_host(slot->host);
- if (ret)
- return ret;
- }
+ ret = sdhci_pci_resume_host(chip);
+ if (ret)
+ return ret;
- return 0;
+ return cqhci_resume(chip->slots[0]->host->mmc);
}
#endif
@@ -166,8 +175,48 @@ static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
return 0;
}
+
+static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip)
+{
+ int ret;
+
+ ret = cqhci_suspend(chip->slots[0]->host->mmc);
+ if (ret)
+ return ret;
+
+ return sdhci_pci_runtime_suspend_host(chip);
+}
+
+static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip)
+{
+ int ret;
+
+ ret = sdhci_pci_runtime_resume_host(chip);
+ if (ret)
+ return ret;
+
+ return cqhci_resume(chip->slots[0]->host->mmc);
+}
#endif
+static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static void sdhci_pci_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
/*****************************************************************************\
* *
* Hardware specific quirk handling *
@@ -583,6 +632,18 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
.voltage_switch = sdhci_intel_voltage_switch,
};
+static const struct sdhci_ops sdhci_intel_glk_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_power = sdhci_intel_set_power,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_pci_hw_reset,
+ .voltage_switch = sdhci_intel_voltage_switch,
+ .irq = sdhci_cqhci_irq,
+};
+
static void byt_read_dsm(struct sdhci_pci_slot *slot)
{
struct intel_host *intel_host = sdhci_pci_priv(slot);
@@ -612,12 +673,80 @@ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
int ret = byt_emmc_probe_slot(slot);
+ slot->host->mmc->caps2 |= MMC_CAP2_CQE;
+
if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
slot->host->mmc_host_ops.hs400_enhanced_strobe =
intel_hs400_enhanced_strobe;
+ slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
+ }
+
+ return ret;
+}
+
+static void glk_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ /*
+ * CQE gets stuck if it sees Buffer Read Enable bit set, which can be
+ * the case after tuning, so ensure the buffer is drained.
+ */
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
}
+ sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops glk_cqhci_ops = {
+ .enable = glk_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_pci_dumpregs,
+};
+
+static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
+{
+ struct device *dev = &slot->chip->pdev->dev;
+ struct sdhci_host *host = slot->host;
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + 0x200;
+ cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ cq_host->ops = &glk_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64)
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
return ret;
}
@@ -699,11 +828,20 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
.allow_runtime_pm = true,
.probe_slot = glk_emmc_probe_slot,
+ .add_host = glk_emmc_add_host,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = sdhci_cqhci_suspend,
+ .resume = sdhci_cqhci_resume,
+#endif
+#ifdef CONFIG_PM
+ .runtime_suspend = sdhci_cqhci_runtime_suspend,
+ .runtime_resume = sdhci_cqhci_runtime_resume,
+#endif
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
SDHCI_QUIRK2_STOP_WITH_TC,
- .ops = &sdhci_intel_byt_ops,
+ .ops = &sdhci_intel_glk_ops,
.priv_size = sizeof(struct intel_host),
};
@@ -778,6 +916,8 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
break;
case INTEL_MRFLD_SDIO:
+ /* Advertise 2.0v for compatibility with the SDIO card's OCR */
+ slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
MMC_CAP_POWER_OFF_CARD;
break;
@@ -955,7 +1095,7 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip)
{
int i, ret;
- ret = __sdhci_pci_suspend_host(chip);
+ ret = sdhci_pci_suspend_host(chip);
if (ret)
return ret;
@@ -965,8 +1105,6 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip)
jmicron_enable_mmc(chip->slots[i]->host, 0);
}
- sdhci_pci_init_wakeup(chip);
-
return 0;
}
@@ -1306,6 +1444,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(O2, SDS1, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
+ SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
/* Generic SD host controller */
{PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
@@ -1320,7 +1459,7 @@ MODULE_DEVICE_TABLE(pci, pci_ids);
* *
\*****************************************************************************/
-static int sdhci_pci_enable_dma(struct sdhci_host *host)
+int sdhci_pci_enable_dma(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot;
struct pci_dev *pdev;
@@ -1543,10 +1682,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
}
}
- host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+ host->mmc->pm_caps = MMC_PM_KEEP_POWER;
host->mmc->slotno = slotno;
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
+ if (device_can_wakeup(&pdev->dev))
+ host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+
if (slot->cd_idx >= 0) {
ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx,
slot->cd_override_level, 0, NULL);
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 0056f08..5cbcdc4 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -55,6 +55,9 @@
#define PCI_SUBDEVICE_ID_NI_7884 0x7884
+#define PCI_VENDOR_ID_ARASAN 0x16e6
+#define PCI_DEVICE_ID_ARASAN_PHY_EMMC 0x0670
+
/*
* PCI device class and mask
*/
@@ -170,11 +173,13 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot)
#ifdef CONFIG_PM_SLEEP
int sdhci_pci_resume_host(struct sdhci_pci_chip *chip);
#endif
-
+int sdhci_pci_enable_dma(struct sdhci_host *host);
int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
#ifdef CONFIG_PM_SLEEP
int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
#endif
+extern const struct sdhci_pci_fixes sdhci_arasan;
+
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 8c0f884..1451152 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -82,6 +82,10 @@ static int sdhci_probe(struct platform_device *pdev)
host->hw_name = "sdhci";
host->ops = &sdhci_pltfm_ops;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ ret = -EINVAL;
+ goto err_host;
+ }
host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
sdhci = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 0842bbc..4d0791f 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -230,7 +230,14 @@ static void xenon_set_power(struct sdhci_host *host, unsigned char mode,
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
}
+static void xenon_voltage_switch(struct sdhci_host *host)
+{
+ /* Wait for 5ms after set 1.8V signal enable bit */
+ usleep_range(5000, 5500);
+}
+
static const struct sdhci_ops sdhci_xenon_ops = {
+ .voltage_switch = xenon_voltage_switch,
.set_clock = sdhci_set_clock,
.set_power = xenon_set_power,
.set_bus_width = sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e9290a3..070aff9 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1434,6 +1434,13 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
if (mode != MMC_POWER_OFF) {
switch (1 << vdd) {
case MMC_VDD_165_195:
+ /*
+ * Without a regulator, SDHCI does not support 2.0v
+ * so we only get here if the driver deliberately
+ * added the 2.0v range to ocr_avail. Map it to 1.8v
+ * for the purpose of turning on the power.
+ */
+ case MMC_VDD_20_21:
pwr = SDHCI_POWER_180;
break;
case MMC_VDD_29_30:
@@ -2821,25 +2828,33 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
* sdhci_disable_irq_wakeups() since it will be set by
* sdhci_enable_card_detection() or sdhci_init().
*/
-void sdhci_enable_irq_wakeups(struct sdhci_host *host)
+static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
+ SDHCI_WAKE_ON_INT;
+ u32 irq_val = 0;
+ u8 wake_val = 0;
u8 val;
- u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
- | SDHCI_WAKE_ON_INT;
- u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
- SDHCI_INT_CARD_INT;
- val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
- val |= mask ;
- /* Avoid fake wake up */
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
- val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
- irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+ if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) {
+ wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
+ irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
}
+
+ wake_val |= SDHCI_WAKE_ON_INT;
+ irq_val |= SDHCI_INT_CARD_INT;
+
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val &= ~mask;
+ val |= wake_val;
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+
sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
+
+ host->irq_wake_enabled = !enable_irq_wake(host->irq);
+
+ return host->irq_wake_enabled;
}
-EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
{
@@ -2850,6 +2865,10 @@ static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
val &= ~mask;
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+
+ disable_irq_wake(host->irq);
+
+ host->irq_wake_enabled = false;
}
int sdhci_suspend_host(struct sdhci_host *host)
@@ -2858,15 +2877,14 @@ int sdhci_suspend_host(struct sdhci_host *host)
mmc_retune_timer_stop(host->mmc);
- if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ if (!device_may_wakeup(mmc_dev(host->mmc)) ||
+ !sdhci_enable_irq_wakeups(host)) {
host->ier = 0;
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
- } else {
- sdhci_enable_irq_wakeups(host);
- enable_irq_wake(host->irq);
}
+
return 0;
}
@@ -2894,15 +2912,14 @@ int sdhci_resume_host(struct sdhci_host *host)
mmiowb();
}
- if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ if (host->irq_wake_enabled) {
+ sdhci_disable_irq_wakeups(host);
+ } else {
ret = request_threaded_irq(host->irq, sdhci_irq,
sdhci_thread_irq, IRQF_SHARED,
mmc_hostname(host->mmc), host);
if (ret)
return ret;
- } else {
- sdhci_disable_irq_wakeups(host);
- disable_irq_wake(host->irq);
}
sdhci_enable_card_detection(host);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 54bc444..afab26f 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -484,6 +484,7 @@ struct sdhci_host {
bool bus_on; /* Bus power prevents runtime suspend */
bool preset_enabled; /* Preset is enabled */
bool pending_reset; /* Cmd/data reset is pending */
+ bool irq_wake_enabled; /* IRQ wakeup is enabled */
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
@@ -718,7 +719,6 @@ void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable);
#ifdef CONFIG_PM
int sdhci_suspend_host(struct sdhci_host *host);
int sdhci_resume_host(struct sdhci_host *host);
-void sdhci_enable_irq_wakeups(struct sdhci_host *host);
int sdhci_runtime_suspend_host(struct sdhci_host *host);
int sdhci_runtime_resume_host(struct sdhci_host *host);
#endif
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 04ca0d3..485f759 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -10,9 +10,11 @@
* the Free Software Foundation, version 2 of the License.
*/
+#include <linux/acpi.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/property.h>
#include <linux/clk.h>
@@ -146,7 +148,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- sdhci_get_of_property(pdev);
host->hw_name = "f_sdh30";
host->ops = &sdhci_f_sdh30_ops;
host->irq = irq;
@@ -158,25 +159,29 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
goto err;
}
- priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(priv->clk_iface)) {
- ret = PTR_ERR(priv->clk_iface);
- goto err;
- }
+ if (dev_of_node(dev)) {
+ sdhci_get_of_property(pdev);
- ret = clk_prepare_enable(priv->clk_iface);
- if (ret)
- goto err;
+ priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(priv->clk_iface)) {
+ ret = PTR_ERR(priv->clk_iface);
+ goto err;
+ }
- priv->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- goto err_clk;
- }
+ ret = clk_prepare_enable(priv->clk_iface);
+ if (ret)
+ goto err;
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- goto err_clk;
+ priv->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto err_clk;
+ }
/* init vendor specific regs */
ctrl = sdhci_readw(host, F_SDH30_AHB_CONFIG);
@@ -226,16 +231,27 @@ static int sdhci_f_sdh30_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
static const struct of_device_id f_sdh30_dt_ids[] = {
{ .compatible = "fujitsu,mb86s70-sdhci-3.0" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, f_sdh30_dt_ids);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id f_sdh30_acpi_ids[] = {
+ { "SCX0002" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, f_sdh30_acpi_ids);
+#endif
static struct platform_driver sdhci_f_sdh30_driver = {
.driver = {
.name = "f_sdh30",
- .of_match_table = f_sdh30_dt_ids,
+ .of_match_table = of_match_ptr(f_sdh30_dt_ids),
+ .acpi_match_table = ACPI_PTR(f_sdh30_acpi_ids),
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_f_sdh30_probe,
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 53fb18b..7bb00c6 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -916,7 +916,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
- u32 opc = cmd->opcode;
+ u32 opc;
u32 mask = 0;
unsigned long flags;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index cc98355d..bad612d 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -3,7 +3,7 @@
* (C) Copyright 2007-2011 Reuuimlla Technology Co., Ltd.
* (C) Copyright 2007-2011 Aaron Maoye <leafy.myeh@reuuimllatech.com>
* (C) Copyright 2013-2014 O2S GmbH <www.o2s.ch>
- * (C) Copyright 2013-2014 David Lanzend�rfer <david.lanzendoerfer@o2s.ch>
+ * (C) Copyright 2013-2014 David Lanzendörfer <david.lanzendoerfer@o2s.ch>
* (C) Copyright 2013-2014 Hans de Goede <hdegoede@redhat.com>
* (C) Copyright 2017 Sootech SA
*
@@ -1255,6 +1255,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
goto error_assert_reset;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ ret = -EINVAL;
+ goto error_assert_reset;
+ }
+
return devm_request_threaded_irq(&pdev->dev, host->irq, sunxi_mmc_irq,
sunxi_mmc_handle_manual_stop, 0, "sunxi-mmc", host);
@@ -1393,5 +1398,5 @@ module_platform_driver(sunxi_mmc_driver);
MODULE_DESCRIPTION("Allwinner's SD/MMC Card Controller Driver");
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("David Lanzend�rfer <david.lanzendoerfer@o2s.ch>");
+MODULE_AUTHOR("David Lanzendörfer <david.lanzendoerfer@o2s.ch>");
MODULE_ALIAS("platform:sunxi-mmc");
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 64b7e9f..43a2ea5 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -92,14 +92,19 @@ static int tmio_mmc_probe(struct platform_device *pdev)
pdata->flags |= TMIO_MMC_HAVE_HIGH_REG;
- host = tmio_mmc_host_alloc(pdev);
- if (!host)
+ host = tmio_mmc_host_alloc(pdev, pdata);
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
goto cell_disable;
+ }
/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
host->bus_shift = resource_size(res) >> 10;
- ret = tmio_mmc_host_probe(host, pdata, NULL);
+ host->mmc->f_max = pdata->hclk;
+ host->mmc->f_min = pdata->hclk / 512;
+
+ ret = tmio_mmc_host_probe(host);
if (ret)
goto host_free;
@@ -128,15 +133,11 @@ out:
static int tmio_mmc_remove(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct tmio_mmc_host *host = platform_get_drvdata(pdev);
- if (mmc) {
- struct tmio_mmc_host *host = mmc_priv(mmc);
-
- tmio_mmc_host_remove(host);
- if (cell->disable)
- cell->disable(pdev);
- }
+ tmio_mmc_host_remove(host);
+ if (cell->disable)
+ cell->disable(pdev);
return 0;
}
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 3e6ff89..e7d6513 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -112,12 +112,6 @@
struct tmio_mmc_data;
struct tmio_mmc_host;
-struct tmio_mmc_dma {
- enum dma_slave_buswidth dma_buswidth;
- bool (*filter)(struct dma_chan *chan, void *arg);
- void (*enable)(struct tmio_mmc_host *host, bool enable);
-};
-
struct tmio_mmc_dma_ops {
void (*start)(struct tmio_mmc_host *host, struct mmc_data *data);
void (*enable)(struct tmio_mmc_host *host, bool enable);
@@ -134,6 +128,7 @@ struct tmio_mmc_host {
struct mmc_request *mrq;
struct mmc_data *data;
struct mmc_host *mmc;
+ struct mmc_host_ops ops;
/* Callbacks for clock / power control */
void (*set_pwr)(struct platform_device *host, int state);
@@ -144,18 +139,15 @@ struct tmio_mmc_host {
struct scatterlist *sg_orig;
unsigned int sg_len;
unsigned int sg_off;
- unsigned long bus_shift;
+ unsigned int bus_shift;
struct platform_device *pdev;
struct tmio_mmc_data *pdata;
- struct tmio_mmc_dma *dma;
/* DMA support */
bool force_pio;
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
- struct completion dma_dataend;
- struct tasklet_struct dma_complete;
struct tasklet_struct dma_issue;
struct scatterlist bounce_sg;
u8 *bounce_buf;
@@ -174,7 +166,6 @@ struct tmio_mmc_host {
struct mutex ios_lock; /* protect set_ios() context */
bool native_hotplug;
bool sdio_irq_enabled;
- u32 scc_tappos;
/* Mandatory callback */
int (*clk_enable)(struct tmio_mmc_host *host);
@@ -185,9 +176,6 @@ struct tmio_mmc_host {
void (*clk_disable)(struct tmio_mmc_host *host);
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
- int (*card_busy)(struct mmc_host *mmc);
- int (*start_signal_voltage_switch)(struct mmc_host *mmc,
- struct mmc_ios *ios);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
void (*hw_reset)(struct tmio_mmc_host *host);
void (*prepare_tuning)(struct tmio_mmc_host *host, unsigned long tap);
@@ -207,11 +195,10 @@ struct tmio_mmc_host {
const struct tmio_mmc_dma_ops *dma_ops;
};
-struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
+struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
+ struct tmio_mmc_data *pdata);
void tmio_mmc_host_free(struct tmio_mmc_host *host);
-int tmio_mmc_host_probe(struct tmio_mmc_host *host,
- struct tmio_mmc_data *pdata,
- const struct tmio_mmc_dma_ops *dma_ops);
+int tmio_mmc_host_probe(struct tmio_mmc_host *host);
void tmio_mmc_host_remove(struct tmio_mmc_host *host);
void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
@@ -240,26 +227,26 @@ int tmio_mmc_host_runtime_resume(struct device *dev);
static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
{
- return readw(host->ctl + (addr << host->bus_shift));
+ return ioread16(host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- readsw(host->ctl + (addr << host->bus_shift), buf, count);
+ ioread16_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host,
int addr)
{
- return readw(host->ctl + (addr << host->bus_shift)) |
- readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+ return ioread16(host->ctl + (addr << host->bus_shift)) |
+ ioread16(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr,
u32 *buf, int count)
{
- readsl(host->ctl + (addr << host->bus_shift), buf, count);
+ ioread32_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
@@ -270,26 +257,26 @@ static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
*/
if (host->write16_hook && host->write16_hook(host, addr))
return;
- writew(val, host->ctl + (addr << host->bus_shift));
+ iowrite16(val, host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- writesw(host->ctl + (addr << host->bus_shift), buf, count);
+ iowrite16_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
int addr, u32 val)
{
- writew(val & 0xffff, host->ctl + (addr << host->bus_shift));
- writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+ iowrite16(val & 0xffff, host->ctl + (addr << host->bus_shift));
+ iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
const u32 *buf, int count)
{
- writesl(host->ctl + (addr << host->bus_shift), buf, count);
+ iowrite32_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
#endif
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 583bf32..3349424 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -806,7 +806,7 @@ static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (ret == 0)
set_bit(i, host->taps);
- mdelay(1);
+ usleep_range(1000, 1200);
}
ret = host->select_tuning(host);
@@ -926,20 +926,6 @@ static void tmio_mmc_done_work(struct work_struct *work)
tmio_mmc_finish_request(host);
}
-static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
-{
- if (!host->clk_enable)
- return -ENOTSUPP;
-
- return host->clk_enable(host);
-}
-
-static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
-{
- if (host->clk_disable)
- host->clk_disable(host);
-}
-
static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
@@ -958,7 +944,7 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
* 100us were not enough. Is this the same 140us delay, as in
* tmio_mmc_set_ios()?
*/
- udelay(200);
+ usleep_range(200, 300);
}
/*
* It seems, VccQ should be switched on after Vcc, this is also what the
@@ -966,7 +952,7 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
*/
if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
ret = regulator_enable(mmc->supply.vqmmc);
- udelay(200);
+ usleep_range(200, 300);
}
if (ret < 0)
@@ -1059,7 +1045,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
/* Let things settle. delay taken from winCE driver */
- udelay(140);
+ usleep_range(140, 200);
if (PTR_ERR(host->mrq) == -EINTR)
dev_dbg(&host->pdev->dev,
"%s.%d: IOS interrupted: clk %u, mode %u",
@@ -1076,15 +1062,9 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = host->pdata;
- int ret = mmc_gpio_get_ro(mmc);
-
- if (ret >= 0)
- return ret;
-
- ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
- (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
- return ret;
+ return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+ (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
}
static int tmio_multi_io_quirk(struct mmc_card *card,
@@ -1098,7 +1078,7 @@ static int tmio_multi_io_quirk(struct mmc_card *card,
return blk_size;
}
-static struct mmc_host_ops tmio_mmc_ops = {
+static const struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
@@ -1145,19 +1125,45 @@ static void tmio_mmc_of_parse(struct platform_device *pdev,
pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
}
-struct tmio_mmc_host*
-tmio_mmc_host_alloc(struct platform_device *pdev)
+struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
+ struct tmio_mmc_data *pdata)
{
struct tmio_mmc_host *host;
struct mmc_host *mmc;
+ struct resource *res;
+ void __iomem *ctl;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctl = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctl))
+ return ERR_CAST(ctl);
mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
if (!mmc)
- return NULL;
+ return ERR_PTR(-ENOMEM);
host = mmc_priv(mmc);
+ host->ctl = ctl;
host->mmc = mmc;
host->pdev = pdev;
+ host->pdata = pdata;
+ host->ops = tmio_mmc_ops;
+ mmc->ops = &host->ops;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret) {
+ host = ERR_PTR(ret);
+ goto free;
+ }
+
+ tmio_mmc_of_parse(pdev, pdata);
+
+ platform_set_drvdata(pdev, host);
+
+ return host;
+free:
+ mmc_free_host(mmc);
return host;
}
@@ -1169,32 +1175,24 @@ void tmio_mmc_host_free(struct tmio_mmc_host *host)
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
-int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
- struct tmio_mmc_data *pdata,
- const struct tmio_mmc_dma_ops *dma_ops)
+int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
{
struct platform_device *pdev = _host->pdev;
+ struct tmio_mmc_data *pdata = _host->pdata;
struct mmc_host *mmc = _host->mmc;
- struct resource *res_ctl;
int ret;
u32 irq_mask = TMIO_MASK_CMD;
- tmio_mmc_of_parse(pdev, pdata);
+ /*
+ * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
+ * looping forever...
+ */
+ if (mmc->f_min == 0)
+ return -EINVAL;
if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
_host->write16_hook = NULL;
- res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_ctl)
- return -EINVAL;
-
- ret = mmc_of_parse(mmc);
- if (ret < 0)
- return ret;
-
- _host->pdata = pdata;
- platform_set_drvdata(pdev, mmc);
-
_host->set_pwr = pdata->set_pwr;
_host->set_clk_div = pdata->set_clk_div;
@@ -1202,15 +1200,11 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
if (ret < 0)
return ret;
- _host->ctl = devm_ioremap(&pdev->dev,
- res_ctl->start, resource_size(res_ctl));
- if (!_host->ctl)
- return -ENOMEM;
-
- tmio_mmc_ops.card_busy = _host->card_busy;
- tmio_mmc_ops.start_signal_voltage_switch =
- _host->start_signal_voltage_switch;
- mmc->ops = &tmio_mmc_ops;
+ if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
+ ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
+ if (ret)
+ return ret;
+ }
mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
@@ -1233,7 +1227,10 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
}
mmc->max_seg_size = mmc->max_req_size;
- _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
+ if (mmc_can_gpio_ro(mmc))
+ _host->ops.get_ro = mmc_gpio_get_ro;
+
+ _host->native_hotplug = !(mmc_can_gpio_cd(mmc) ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
!mmc_card_is_removable(mmc));
@@ -1246,18 +1243,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
if (pdata->flags & TMIO_MMC_MIN_RCAR2)
_host->native_hotplug = true;
- if (tmio_mmc_clk_enable(_host) < 0) {
- mmc->f_max = pdata->hclk;
- mmc->f_min = mmc->f_max / 512;
- }
-
- /*
- * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
- * looping forever...
- */
- if (mmc->f_min == 0)
- return -EINVAL;
-
/*
* While using internal tmio hardware logic for card detection, we need
* to ensure it stays powered for it to work.
@@ -1293,7 +1278,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
INIT_WORK(&_host->done, tmio_mmc_done_work);
/* See if we also get DMA */
- _host->dma_ops = dma_ops;
tmio_mmc_request_dma(_host, pdata);
pm_runtime_set_active(&pdev->dev);
@@ -1307,14 +1291,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
- if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
- ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
- if (ret)
- goto remove_host;
-
- mmc_gpiod_request_cd_irq(mmc);
- }
-
return 0;
remove_host:
@@ -1343,16 +1319,27 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- tmio_mmc_clk_disable(host);
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
#ifdef CONFIG_PM
+static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
+{
+ if (!host->clk_enable)
+ return -ENOTSUPP;
+
+ return host->clk_enable(host);
+}
+
+static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
+{
+ if (host->clk_disable)
+ host->clk_disable(host);
+}
+
int tmio_mmc_host_runtime_suspend(struct device *dev)
{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
@@ -1372,8 +1359,7 @@ static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
int tmio_mmc_host_runtime_resume(struct device *dev)
{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_reset(host);
tmio_mmc_clk_enable(host);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 0806f72..a85af23 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -904,9 +904,6 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
if (ooblen % DOC_LAYOUT_OOB_SIZE)
return -EINVAL;
- if (from + len > mtd->size)
- return -EINVAL;
-
ops->oobretlen = 0;
ops->retlen = 0;
ret = 0;
@@ -990,36 +987,6 @@ err_in_read:
goto out;
}
-/**
- * doc_read - Read bytes from flash
- * @mtd: the device
- * @from: the offset from first block and first page, in bytes, aligned on page
- * size
- * @len: the number of bytes to read (must be a multiple of 4)
- * @retlen: the number of bytes actually read
- * @buf: the filled in buffer
- *
- * Reads flash memory pages. This function does not read the OOB chunk, but only
- * the page data.
- *
- * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
- */
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct mtd_oob_ops ops;
- size_t ret;
-
- memset(&ops, 0, sizeof(ops));
- ops.datbuf = buf;
- ops.len = len;
- ops.mode = MTD_OPS_AUTO_OOB;
-
- ret = doc_read_oob(mtd, from, &ops);
- *retlen = ops.retlen;
- return ret;
-}
-
static int doc_reload_bbt(struct docg3 *docg3)
{
int block = DOC_LAYOUT_BLOCK_BBT;
@@ -1471,8 +1438,6 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
if (len && ooblen &&
(len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
return -EINVAL;
- if (ofs + len > mtd->size)
- return -EINVAL;
ops->oobretlen = 0;
ops->retlen = 0;
@@ -1513,39 +1478,6 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
return ret;
}
-/**
- * doc_write - Write a buffer to the chip
- * @mtd: the device
- * @to: the offset from first block and first page, in bytes, aligned on page
- * size
- * @len: the number of bytes to write (must be a full page size, ie. 512)
- * @retlen: the number of bytes actually written (0 or 512)
- * @buf: the buffer to get bytes from
- *
- * Writes data to the chip.
- *
- * Returns 0 if write successful, -EIO if write error
- */
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct docg3 *docg3 = mtd->priv;
- int ret;
- struct mtd_oob_ops ops;
-
- doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len);
- ops.datbuf = (char *)buf;
- ops.len = len;
- ops.mode = MTD_OPS_PLACE_OOB;
- ops.oobbuf = NULL;
- ops.ooblen = 0;
- ops.ooboffs = 0;
-
- ret = doc_write_oob(mtd, to, &ops);
- *retlen = ops.retlen;
- return ret;
-}
-
static struct docg3 *sysfs_dev2docg3(struct device *dev,
struct device_attribute *attr)
{
@@ -1866,8 +1798,6 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
mtd->_erase = doc_erase;
- mtd->_read = doc_read;
- mtd->_write = doc_write;
mtd->_read_oob = doc_read_oob;
mtd->_write_oob = doc_write_oob;
mtd->_block_isbad = doc_block_isbad;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index dbe6a1d..a4e18f6 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -307,10 +307,18 @@ static int m25p_remove(struct spi_device *spi)
{
struct m25p *flash = spi_get_drvdata(spi);
+ spi_nor_restore(&flash->spi_nor);
+
/* Clean up MTD stuff. */
return mtd_device_unregister(&flash->spi_nor.mtd);
}
+static void m25p_shutdown(struct spi_device *spi)
+{
+ struct m25p *flash = spi_get_drvdata(spi);
+
+ spi_nor_restore(&flash->spi_nor);
+}
/*
* Do NOT add to this array without reading the following:
*
@@ -386,6 +394,7 @@ static struct spi_driver m25p80_driver = {
.id_table = m25p_ids,
.probe = m25p_probe,
.remove = m25p_remove,
+ .shutdown = m25p_shutdown,
/* REVISIT: many of these chips have deep power-down modes, which
* should clearly be entered on suspend() to minimize power use.
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index 8956b7d..75f71d1 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -68,6 +68,7 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
+ int ret;
spi_message_init(&message);
@@ -84,12 +85,16 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
mutex_lock(&flash->lock);
- spi_sync(flash->spi, &message);
+ ret = spi_sync(flash->spi, &message);
+
+ mutex_unlock(&flash->lock);
+
+ if (ret)
+ return ret;
if (retlen && message.actual_length > sizeof(command))
*retlen += message.actual_length - sizeof(command);
- mutex_unlock(&flash->lock);
return 0;
}
@@ -100,6 +105,7 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
+ int ret;
spi_message_init(&message);
@@ -117,12 +123,16 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
mutex_lock(&flash->lock);
- spi_sync(flash->spi, &message);
+ ret = spi_sync(flash->spi, &message);
+
+ mutex_unlock(&flash->lock);
+
+ if (ret)
+ return ret;
if (retlen && message.actual_length > sizeof(command))
*retlen += message.actual_length - sizeof(command);
- mutex_unlock(&flash->lock);
return 0;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index f80e911..28553c8 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -503,6 +503,11 @@ int add_mtd_device(struct mtd_info *mtd)
return -EEXIST;
BUG_ON(mtd->writesize == 0);
+
+ if (WARN_ON((!mtd->erasesize || !mtd->_erase) &&
+ !(mtd->flags & MTD_NO_ERASE)))
+ return -EINVAL;
+
mutex_lock(&mtd_table_mutex);
i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
@@ -1053,7 +1058,20 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
* representing the maximum number of bitflips that were corrected on
* any one ecc region (if applicable; zero otherwise).
*/
- ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ if (mtd->_read) {
+ ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ } else if (mtd->_read_oob) {
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = buf,
+ };
+
+ ret_code = mtd->_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ } else {
+ return -ENOTSUPP;
+ }
+
if (unlikely(ret_code < 0))
return ret_code;
if (mtd->ecc_strength == 0)
@@ -1068,11 +1086,25 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
*retlen = 0;
if (to < 0 || to >= mtd->size || len > mtd->size - to)
return -EINVAL;
- if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
+ if ((!mtd->_write && !mtd->_write_oob) ||
+ !(mtd->flags & MTD_WRITEABLE))
return -EROFS;
if (!len)
return 0;
ledtrig_mtd_activity();
+
+ if (!mtd->_write) {
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = (u8 *)buf,
+ };
+ int ret;
+
+ ret = mtd->_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+ }
+
return mtd->_write(mtd, to, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_write);
@@ -1114,7 +1146,7 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
if (!ops->oobbuf)
ops->ooblen = 0;
- if (offs < 0 || offs + ops->len >= mtd->size)
+ if (offs < 0 || offs + ops->len > mtd->size)
return -EINVAL;
if (ops->ooblen) {
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index be088bc..76cd21d 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -105,34 +105,17 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct mtd_part *part = mtd_to_part(mtd);
+ struct mtd_ecc_stats stats;
int res;
- if (from >= mtd->size)
- return -EINVAL;
- if (ops->datbuf && from + ops->len > mtd->size)
- return -EINVAL;
-
- /*
- * If OOB is also requested, make sure that we do not read past the end
- * of this partition.
- */
- if (ops->oobbuf) {
- size_t len, pages;
-
- len = mtd_oobavail(mtd, ops);
- pages = mtd_div_by_ws(mtd->size, mtd);
- pages -= mtd_div_by_ws(from, mtd);
- if (ops->ooboffs + ops->ooblen > pages * len)
- return -EINVAL;
- }
-
+ stats = part->parent->ecc_stats;
res = part->parent->_read_oob(part->parent, from + part->offset, ops);
- if (unlikely(res)) {
- if (mtd_is_bitflip(res))
- mtd->ecc_stats.corrected++;
- if (mtd_is_eccerr(res))
- mtd->ecc_stats.failed++;
- }
+ if (unlikely(mtd_is_eccerr(res)))
+ mtd->ecc_stats.failed +=
+ part->parent->ecc_stats.failed - stats.failed;
+ else
+ mtd->ecc_stats.corrected +=
+ part->parent->ecc_stats.corrected - stats.corrected;
return res;
}
@@ -189,10 +172,6 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
{
struct mtd_part *part = mtd_to_part(mtd);
- if (to >= mtd->size)
- return -EINVAL;
- if (ops->datbuf && to + ops->len > mtd->size)
- return -EINVAL;
return part->parent->_write_oob(part->parent, to + part->offset, ops);
}
@@ -435,8 +414,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
parent->dev.parent;
slave->mtd.dev.of_node = part->of_node;
- slave->mtd._read = part_read;
- slave->mtd._write = part_write;
+ if (parent->_read)
+ slave->mtd._read = part_read;
+ if (parent->_write)
+ slave->mtd._write = part_write;
if (parent->_panic_write)
slave->mtd._panic_write = part_panic_write;
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index f07492c..7eb0e1f 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1223,8 +1223,9 @@ static int mtdswap_show(struct seq_file *s, void *data)
unsigned int max[MTDSWAP_TREE_CNT];
unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages;
uint64_t use_size;
- char *name[] = {"clean", "used", "low", "high", "dirty", "bitflip",
- "failing"};
+ static const char * const name[] = {
+ "clean", "used", "low", "high", "dirty", "bitflip", "failing"
+ };
mutex_lock(&d->mbd_dev->lock);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index bb48aaf..e6b8c59 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -315,6 +315,7 @@ config MTD_NAND_ATMEL
config MTD_NAND_PXA3xx
tristate "NAND support on PXA3xx and Armada 370/XP"
+ depends on !MTD_NAND_MARVELL
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
help
@@ -323,6 +324,18 @@ config MTD_NAND_PXA3xx
platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
platforms (7K, 8K) (NFCv2).
+config MTD_NAND_MARVELL
+ tristate "NAND controller support on Marvell boards"
+ depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
+ COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables the NAND flash controller driver for Marvell boards,
+ including:
+ - PXA3xx processors (NFCv1)
+ - 32-bit Armada platforms (XP, 37x, 38x, 39x) (NFCv2)
+ - 64-bit Aramda platforms (7k, 8k) (NFCv2)
+
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"
depends on ARCH_LPC32XX
@@ -376,9 +389,7 @@ config MTD_NAND_GPMI_NAND
Enables NAND Flash support for IMX23, IMX28 or IMX6.
The GPMI controller is very powerful, with the help of BCH
module, it can do the hardware ECC. The GPMI supports several
- NAND flashs at the same time. The GPMI may conflicts with other
- block, such as SD card. So pay attention to it when you enable
- the GPMI.
+ NAND flashs at the same time.
config MTD_NAND_BRCMNAND
tristate "Broadcom STB NAND controller"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 118a134..921634b 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
+obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o
obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index 90a71a5..b2f00b3 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -841,6 +841,8 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
struct atmel_nand *nand = to_atmel_nand(chip);
int ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
if (ret)
return ret;
@@ -857,7 +859,7 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
@@ -881,6 +883,8 @@ static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
if (ret)
return ret;
@@ -1000,7 +1004,7 @@ static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
* to the non-optimized one.
*/
if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
raw);
@@ -1178,7 +1182,6 @@ static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand)
chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
- chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
return 0;
}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 5655dca..87bbd17 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -572,6 +572,8 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -582,10 +584,10 @@ static int bf5xx_nand_write_page_raw(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/*
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index e0eb51d..c28fd2b 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -1071,7 +1071,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
return;
brcmnand_set_wp(ctrl, wp);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ nand_status_op(chip, NULL);
/* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
ret = bcmnand_ctrl_poll_status(ctrl,
NAND_CTRL_RDY |
@@ -1453,7 +1453,7 @@ static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
/* At FC_BYTES boundary, switch to next column */
if (host->last_byte > 0 && offs == 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, addr, -1);
+ nand_change_read_column_op(chip, addr, NULL, 0, false);
ret = ctrl->flash_cache[offs];
break;
@@ -1681,7 +1681,7 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
int ret;
if (!buf) {
- buf = chip->buffers->databuf;
+ buf = chip->data_buf;
/* Invalidate page cache */
chip->pagebuf = -1;
}
@@ -1689,7 +1689,6 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
sas = mtd->oobsize / chip->ecc.steps;
/* read without ecc for verification */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page);
if (ret)
return ret;
@@ -1763,7 +1762,7 @@ try_dmaread:
err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
addr);
/* erased page bitflips corrected */
- if (err > 0)
+ if (err >= 0)
return err;
}
@@ -1793,6 +1792,8 @@ static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct brcmnand_host *host = nand_get_controller_data(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
return brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
}
@@ -1804,6 +1805,8 @@ static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
brcmnand_set_ecc_enabled(host, 0);
ret = brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
@@ -1909,8 +1912,10 @@ static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int brcmnand_write_page_raw(struct mtd_info *mtd,
@@ -1920,10 +1925,12 @@ static int brcmnand_write_page_raw(struct mtd_info *mtd,
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
brcmnand_set_ecc_enabled(host, 0);
brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
@@ -2193,16 +2200,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
if (ctrl->nand_version >= 0x0702)
tmp |= ACC_CONTROL_RD_ERASED;
tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
- if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
- /*
- * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
- * errors
- */
- if (has_flash_dma(ctrl))
- tmp &= ~ACC_CONTROL_PREFETCH;
- else
- tmp |= ACC_CONTROL_PREFETCH;
- }
+ if (ctrl->features & BRCMNAND_HAS_PREFETCH)
+ tmp &= ~ACC_CONTROL_PREFETCH;
+
nand_writereg(ctrl, offs, tmp);
return 0;
@@ -2230,6 +2230,9 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
nand_set_controller_data(chip, host);
mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
host->cs);
+ if (!mtd->name)
+ return -ENOMEM;
+
mtd->owner = THIS_MODULE;
mtd->dev.parent = &pdev->dev;
@@ -2369,12 +2372,11 @@ static int brcmnand_resume(struct device *dev)
list_for_each_entry(host, &ctrl->host_list, node) {
struct nand_chip *chip = &host->chip;
- struct mtd_info *mtd = nand_to_mtd(chip);
brcmnand_save_restore_cs_config(host, 1);
/* Reset the chip, required by some chips after power-up */
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(chip);
}
return 0;
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index bc558c4..567ff97 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -353,23 +353,15 @@ static void cafe_nand_bug(struct mtd_info *mtd)
static int cafe_nand_write_oob(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- int status = 0;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
/* Don't use -- use nand_read_oob_std for now */
static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/**
* cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
@@ -391,7 +383,7 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
cafe_readl(cafe, NAND_ECC_RESULT),
cafe_readl(cafe, NAND_ECC_SYN01));
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
@@ -549,13 +541,13 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
{
struct cafe_priv *cafe = nand_get_controller_data(chip);
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
/* Set up ECC autogeneration */
cafe->ctl2 |= (1<<30);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
@@ -613,7 +605,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
uint32_t ctrl;
int err = 0;
int old_dma;
- struct nand_buffers *nbuf;
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
@@ -661,7 +652,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
/* Enable the following for a flash based bad block table */
cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
- cafe->nand.options = NAND_OWN_BUFFERS;
if (skipbbt) {
cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -731,32 +721,20 @@ static int cafe_nand_probe(struct pci_dev *pdev,
if (err)
goto out_irq;
- cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- &cafe->dmaaddr, GFP_KERNEL);
+ cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112,
+ &cafe->dmaaddr, GFP_KERNEL);
if (!cafe->dmabuf) {
err = -ENOMEM;
goto out_irq;
}
- cafe->nand.buffers = nbuf = (void *)cafe->dmabuf + 2112;
/* Set up DMA address */
- cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
- if (sizeof(cafe->dmaaddr) > 4)
- /* Shift in two parts to shut the compiler up */
- cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
- else
- cafe_writel(cafe, 0, NAND_DMA_ADDR1);
+ cafe_writel(cafe, lower_32_bits(cafe->dmaaddr), NAND_DMA_ADDR0);
+ cafe_writel(cafe, upper_32_bits(cafe->dmaaddr), NAND_DMA_ADDR1);
cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
- /* this driver does not need the @ecccalc and @ecccode */
- nbuf->ecccalc = NULL;
- nbuf->ecccode = NULL;
- nbuf->databuf = (uint8_t *)(nbuf + 1);
-
/* Restore the DMA flag */
usedma = old_dma;
@@ -801,10 +779,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
goto out;
out_free_dma:
- dma_free_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- cafe->dmabuf, cafe->dmaaddr);
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
out_irq:
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
@@ -829,10 +804,7 @@ static void cafe_nand_remove(struct pci_dev *pdev)
nand_release(mtd);
free_rs(cafe->rs);
pci_iounmap(pdev, cafe->mmio);
- dma_free_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- cafe->dmabuf, cafe->dmaaddr);
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
kfree(cafe);
}
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 5124f8a..313c7f5 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -330,16 +330,12 @@ static int denali_check_erased_page(struct mtd_info *mtd,
unsigned long uncor_ecc_flags,
unsigned int max_bitflips)
{
- uint8_t *ecc_code = chip->buffers->ecccode;
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- int i, ret, stat;
-
- ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
- chip->ecc.total);
- if (ret)
- return ret;
+ int i, stat;
for (i = 0; i < ecc_steps; i++) {
if (!(uncor_ecc_flags & BIT(i)))
@@ -645,8 +641,6 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
int page, int write)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0;
- unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT;
int writesize = mtd->writesize;
int oobsize = mtd->oobsize;
uint8_t *bufpoi = chip->oob_poi;
@@ -658,11 +652,11 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
int i, pos, len;
/* BBM at the beginning of the OOB area */
- chip->cmdfunc(mtd, start_cmd, writesize, page);
if (write)
- chip->write_buf(mtd, bufpoi, oob_skip);
+ nand_prog_page_begin_op(chip, page, writesize, bufpoi,
+ oob_skip);
else
- chip->read_buf(mtd, bufpoi, oob_skip);
+ nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
bufpoi += oob_skip;
/* OOB ECC */
@@ -675,30 +669,35 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- chip->cmdfunc(mtd, rnd_cmd, pos, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, pos, bufpoi, len,
+ false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, pos, bufpoi, len,
+ false);
bufpoi += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, writesize +
+ oob_skip, bufpoi,
+ len, false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, writesize +
+ oob_skip, bufpoi,
+ len, false);
bufpoi += len;
}
}
/* OOB free */
len = oobsize - (bufpoi - chip->oob_poi);
- chip->cmdfunc(mtd, rnd_cmd, size - len, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, size - len, bufpoi, len,
+ false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, size - len, bufpoi, len,
+ false);
}
static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
@@ -710,12 +709,12 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- void *dma_buf = denali->buf;
+ void *tmp_buf = denali->buf;
int oob_skip = denali->oob_skip_bytes;
size_t size = writesize + oobsize;
int ret, i, pos, len;
- ret = denali_data_xfer(denali, dma_buf, size, page, 1, 0);
+ ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
if (ret)
return ret;
@@ -730,11 +729,11 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(buf, dma_buf + pos, len);
+ memcpy(buf, tmp_buf + pos, len);
buf += len;
if (len < ecc_size) {
len = ecc_size - len;
- memcpy(buf, dma_buf + writesize + oob_skip,
+ memcpy(buf, tmp_buf + writesize + oob_skip,
len);
buf += len;
}
@@ -745,7 +744,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *oob = chip->oob_poi;
/* BBM at the beginning of the OOB area */
- memcpy(oob, dma_buf + writesize, oob_skip);
+ memcpy(oob, tmp_buf + writesize, oob_skip);
oob += oob_skip;
/* OOB ECC */
@@ -758,11 +757,11 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(oob, dma_buf + pos, len);
+ memcpy(oob, tmp_buf + pos, len);
oob += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- memcpy(oob, dma_buf + writesize + oob_skip,
+ memcpy(oob, tmp_buf + writesize + oob_skip,
len);
oob += len;
}
@@ -770,7 +769,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* OOB free */
len = oobsize - (oob - chip->oob_poi);
- memcpy(oob, dma_buf + size - len, len);
+ memcpy(oob, tmp_buf + size - len, len);
}
return 0;
@@ -788,16 +787,12 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- int status;
denali_reset_irq(denali);
denali_oob_xfer(mtd, chip, page, 1);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -841,7 +836,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- void *dma_buf = denali->buf;
+ void *tmp_buf = denali->buf;
int oob_skip = denali->oob_skip_bytes;
size_t size = writesize + oobsize;
int i, pos, len;
@@ -851,7 +846,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
* This simplifies the logic.
*/
if (!buf || !oob_required)
- memset(dma_buf, 0xff, size);
+ memset(tmp_buf, 0xff, size);
/* Arrange the buffer for syndrome payload/ecc layout */
if (buf) {
@@ -864,11 +859,11 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(dma_buf + pos, buf, len);
+ memcpy(tmp_buf + pos, buf, len);
buf += len;
if (len < ecc_size) {
len = ecc_size - len;
- memcpy(dma_buf + writesize + oob_skip, buf,
+ memcpy(tmp_buf + writesize + oob_skip, buf,
len);
buf += len;
}
@@ -879,7 +874,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *oob = chip->oob_poi;
/* BBM at the beginning of the OOB area */
- memcpy(dma_buf + writesize, oob, oob_skip);
+ memcpy(tmp_buf + writesize, oob, oob_skip);
oob += oob_skip;
/* OOB ECC */
@@ -892,11 +887,11 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(dma_buf + pos, oob, len);
+ memcpy(tmp_buf + pos, oob, len);
oob += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- memcpy(dma_buf + writesize + oob_skip, oob,
+ memcpy(tmp_buf + writesize + oob_skip, oob,
len);
oob += len;
}
@@ -904,10 +899,10 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* OOB free */
len = oobsize - (oob - chip->oob_poi);
- memcpy(dma_buf + size - len, oob, len);
+ memcpy(tmp_buf + size - len, oob, len);
}
- return denali_data_xfer(denali, dma_buf, size, page, 1, 1);
+ return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
}
static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -951,7 +946,7 @@ static int denali_erase(struct mtd_info *mtd, int page)
irq_status = denali_wait_for_irq(denali,
INTR__ERASE_COMP | INTR__ERASE_FAIL);
- return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL;
+ return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
}
static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
@@ -1359,7 +1354,6 @@ int denali_init(struct denali_nand_info *denali)
chip->read_buf = denali_read_buf;
chip->write_buf = denali_write_buf;
}
- chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
chip->ecc.read_page = denali_read_page;
chip->ecc.read_page_raw = denali_read_page_raw;
chip->ecc.write_page = denali_write_page;
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 2911066..9ad33d2 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -329,7 +329,7 @@ struct denali_nand_info {
#define DENALI_CAP_DMA_64BIT BIT(1)
int denali_calc_ecc_bytes(int step_size, int strength);
-extern int denali_init(struct denali_nand_info *denali);
-extern void denali_remove(struct denali_nand_info *denali);
+int denali_init(struct denali_nand_info *denali);
+void denali_remove(struct denali_nand_info *denali);
#endif /* __DENALI_H__ */
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 57fb7ae..49cb3e1 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -125,3 +125,7 @@ static struct pci_driver denali_pci_driver = {
.remove = denali_pci_remove,
};
module_pci_driver(denali_pci_driver);
+
+MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
+MODULE_AUTHOR("Intel Corporation and its suppliers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 72671dc..6bc93ea 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -448,7 +448,7 @@ static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
int status;
DoC_WaitReady(doc);
- this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ nand_status_op(this, NULL);
DoC_WaitReady(doc);
status = (int)this->read_byte(mtd);
@@ -595,7 +595,7 @@ static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
/* Assert ChipEnable and deassert WriteProtect */
WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
- this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(this);
doc->curchip = chip;
doc->curfloor = floor;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 2436cbc..72f1327 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -785,6 +785,8 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
+ nand_read_page_op(nand, page, 0, NULL, 0);
+
writew(DOC_ECCCONF0_READ_MODE |
DOC_ECCCONF0_ECC_ENABLE |
DOC_ECCCONF0_UNKNOWN |
@@ -864,7 +866,7 @@ static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
- docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
+ nand_read_page_op(nand, page, nand->ecc.size, NULL, 0);
writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
write_nop(docptr);
@@ -900,6 +902,7 @@ static int docg4_erase_block(struct mtd_info *mtd, int page)
struct docg4_priv *doc = nand_get_controller_data(nand);
void __iomem *docptr = doc->virtadr;
uint16_t g4_page;
+ int status;
dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
@@ -939,11 +942,15 @@ static int docg4_erase_block(struct mtd_info *mtd, int page)
poll_status(doc);
write_nop(docptr);
- return nand->waitfunc(mtd, nand);
+ status = nand->waitfunc(mtd, nand);
+ if (status < 0)
+ return status;
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
}
static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
- const uint8_t *buf, bool use_ecc)
+ const uint8_t *buf, int page, bool use_ecc)
{
struct docg4_priv *doc = nand_get_controller_data(nand);
void __iomem *docptr = doc->virtadr;
@@ -951,6 +958,8 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s...\n", __func__);
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
+
writew(DOC_ECCCONF0_ECC_ENABLE |
DOC_ECCCONF0_UNKNOWN |
DOCG4_BCH_SIZE,
@@ -995,19 +1004,19 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
writew(0, docptr + DOC_DATAEND);
write_nop(docptr);
- return 0;
+ return nand_prog_page_end_op(nand);
}
static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf, int oob_required, int page)
{
- return write_page(mtd, nand, buf, false);
+ return write_page(mtd, nand, buf, page, false);
}
static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf, int oob_required, int page)
{
- return write_page(mtd, nand, buf, true);
+ return write_page(mtd, nand, buf, page, true);
}
static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 17db2f9..8b6dcd7 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -713,7 +713,7 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
- fsl_elbc_read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -729,10 +729,10 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/* ECC will be calculated automatically, and errors will be detected in
@@ -742,10 +742,10 @@ static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offset, uint32_t data_len,
const uint8_t *buf, int oob_required, int page)
{
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
fsl_elbc_write_buf(mtd, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 9e03bac..4872a7b 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -688,7 +688,7 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
- fsl_ifc_read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -711,10 +711,10 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- fsl_ifc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
@@ -916,6 +916,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
if (ctrl->version >= FSL_IFC_VERSION_1_1_0)
fsl_ifc_sram_init(priv);
+ /*
+ * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older
+ * versions which had 8KB. Hence bufnum mask needs to be updated.
+ */
+ if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
+ priv->bufnum_mask = (priv->bufnum_mask * 2) + 1;
+
return 0;
}
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index eac15d9..f49ed46 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -684,8 +684,8 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
int off, len, group = 0;
/*
* ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
@@ -697,7 +697,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
unsigned int max_bitflips = 0;
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
+ nand_read_page_op(chip, page, s * eccsize, NULL, 0);
chip->ecc.hwctl(mtd, NAND_ECC_READ);
chip->read_buf(mtd, p, eccsize);
@@ -720,8 +720,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
if (chip->options & NAND_BUSWIDTH_16)
len = roundup(len, 2);
- chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
- chip->read_buf(mtd, oob + j, len);
+ nand_read_oob_op(chip, page, off, oob + j, len);
j += len;
}
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 484f7fb..a8bde66 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -253,9 +253,9 @@ static int gpio_nand_probe(struct platform_device *pdev)
goto out_ce;
}
- gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
- if (IS_ERR(gpiomtd->nwp)) {
- ret = PTR_ERR(gpiomtd->nwp);
+ gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->ale)) {
+ ret = PTR_ERR(gpiomtd->ale);
goto out_ce;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 50f8d4a..61fdd73 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1029,11 +1029,13 @@ static void block_mark_swapping(struct gpmi_nand_data *this,
p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
}
-static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int gpmi_ecc_read_page_data(struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
{
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
+ struct mtd_info *mtd = nand_to_mtd(chip);
void *payload_virt;
dma_addr_t payload_phys;
void *auxiliary_virt;
@@ -1067,9 +1069,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return ret;
}
- /* handle the block mark swapping */
- block_mark_swapping(this, payload_virt, auxiliary_virt);
-
/* Loop over status bytes, accumulating ECC status. */
status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
@@ -1097,8 +1096,8 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
offset /= 8;
eccbytes -= offset;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
- chip->read_buf(mtd, eccbuf, eccbytes);
+ nand_change_read_column_op(chip, offset, eccbuf,
+ eccbytes, false);
/*
* ECC data are not byte aligned and we may have
@@ -1158,6 +1157,9 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
max_bitflips = max_t(unsigned int, max_bitflips, *status);
}
+ /* handle the block mark swapping */
+ block_mark_swapping(this, buf, auxiliary_virt);
+
if (oob_required) {
/*
* It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
@@ -1176,6 +1178,14 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return max_bitflips;
}
+static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ return gpmi_ecc_read_page_data(chip, buf, oob_required, page);
+}
+
/* Fake a virtual small page for the subpage read */
static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offs, uint32_t len, uint8_t *buf, int page)
@@ -1220,12 +1230,12 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
meta = geo->metadata_size;
if (first) {
col = meta + (size + ecc_parity_size) * first;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, col, -1);
-
meta = 0;
buf = buf + first * size;
}
+ nand_read_page_op(chip, page, col, NULL, 0);
+
/* Save the old environment */
r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
@@ -1254,7 +1264,7 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
/* Read the subpage now */
this->swap_block_mark = false;
- max_bitflips = gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+ max_bitflips = gpmi_ecc_read_page_data(chip, buf, 0, page);
/* Restore */
writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
@@ -1277,6 +1287,9 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
int ret;
dev_dbg(this->dev, "ecc write page.\n");
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
if (this->swap_block_mark) {
/*
* If control arrives here, we're doing block mark swapping.
@@ -1338,7 +1351,10 @@ exit_auxiliary:
payload_virt, payload_phys);
}
- return 0;
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
}
/*
@@ -1411,7 +1427,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
memset(chip->oob_poi, ~0, mtd->oobsize);
/* Read out the conventional OOB. */
- chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
/*
@@ -1421,7 +1437,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
*/
if (GPMI_IS_MX23(this)) {
/* Read the block mark into the first byte of the OOB buffer. */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
chip->oob_poi[0] = chip->read_byte(mtd);
}
@@ -1432,7 +1448,6 @@ static int
gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
struct mtd_oob_region of = { };
- int status = 0;
/* Do we have available oob area? */
mtd_ooblayout_free(mtd, 0, &of);
@@ -1442,12 +1457,8 @@ gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
if (!nand_is_slc(chip))
return -EPERM;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of.offset, page);
- chip->write_buf(mtd, chip->oob_poi + of.offset, of.length);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
+ chip->oob_poi + of.offset, of.length);
}
/*
@@ -1477,8 +1488,8 @@ static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
uint8_t *oob = chip->oob_poi;
int step;
- chip->read_buf(mtd, tmp_buf,
- mtd->writesize + mtd->oobsize);
+ nand_read_page_op(chip, page, 0, tmp_buf,
+ mtd->writesize + mtd->oobsize);
/*
* If required, swap the bad block marker and the data stored in the
@@ -1487,12 +1498,8 @@ static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
* See the layout description for a detailed explanation on why this
* is needed.
*/
- if (this->swap_block_mark) {
- u8 swap = tmp_buf[0];
-
- tmp_buf[0] = tmp_buf[mtd->writesize];
- tmp_buf[mtd->writesize] = swap;
- }
+ if (this->swap_block_mark)
+ swap(tmp_buf[0], tmp_buf[mtd->writesize]);
/*
* Copy the metadata section into the oob buffer (this section is
@@ -1615,31 +1622,22 @@ static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
* See the layout description for a detailed explanation on why this
* is needed.
*/
- if (this->swap_block_mark) {
- u8 swap = tmp_buf[0];
-
- tmp_buf[0] = tmp_buf[mtd->writesize];
- tmp_buf[mtd->writesize] = swap;
- }
-
- chip->write_buf(mtd, tmp_buf, mtd->writesize + mtd->oobsize);
+ if (this->swap_block_mark)
+ swap(tmp_buf[0], tmp_buf[mtd->writesize]);
- return 0;
+ return nand_prog_page_op(chip, page, 0, tmp_buf,
+ mtd->writesize + mtd->oobsize);
}
static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page);
}
static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
-
return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1, page);
}
@@ -1649,7 +1647,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct gpmi_nand_data *this = nand_get_controller_data(chip);
int ret = 0;
uint8_t *block_mark;
- int column, page, status, chipnr;
+ int column, page, chipnr;
chipnr = (int)(ofs >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
@@ -1663,13 +1661,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
/* Shift to get page */
page = (int)(ofs >> chip->page_shift);
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
- chip->write_buf(mtd, block_mark, 1);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- ret = -EIO;
+ ret = nand_prog_page_op(chip, page, column, block_mark, 1);
chip->select_chip(mtd, -1);
@@ -1712,7 +1704,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
unsigned int search_area_size_in_strides;
unsigned int stride;
unsigned int page;
- uint8_t *buffer = chip->buffers->databuf;
+ uint8_t *buffer = chip->data_buf;
int saved_chip_number;
int found_an_ncb_fingerprint = false;
@@ -1737,7 +1729,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
* Read the NCB fingerprint. The fingerprint is four bytes long
* and starts in the 12th byte of the page.
*/
- chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
+ nand_read_page_op(chip, page, 12, NULL, 0);
chip->read_buf(mtd, buffer, strlen(fingerprint));
/* Look for the fingerprint. */
@@ -1771,7 +1763,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
unsigned int block;
unsigned int stride;
unsigned int page;
- uint8_t *buffer = chip->buffers->databuf;
+ uint8_t *buffer = chip->data_buf;
int saved_chip_number;
int status;
@@ -1797,17 +1789,10 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
dev_dbg(dev, "Erasing the search area...\n");
for (block = 0; block < search_area_size_in_blocks; block++) {
- /* Compute the page address. */
- page = block * block_size_in_pages;
-
/* Erase this block. */
dev_dbg(dev, "\tErasing block 0x%x\n", block);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
-
- /* Wait for the erase to finish. */
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
+ status = nand_erase_op(chip, block);
+ if (status)
dev_err(dev, "[%s] Erase failed.\n", __func__);
}
@@ -1823,13 +1808,9 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Write the first page of the current stride. */
dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- /* Wait for the write to finish. */
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
+ status = chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
+ if (status)
dev_err(dev, "[%s] Write failed.\n", __func__);
}
@@ -1884,7 +1865,7 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
/* Send the command to read the conventional block mark. */
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
block_mark = chip->read_byte(mtd);
chip->select_chip(mtd, -1);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index a45e4ce..06c1f99 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -268,31 +268,31 @@ struct timing_threshold {
};
/* Common Services */
-extern int common_nfc_set_geometry(struct gpmi_nand_data *);
-extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
-extern void prepare_data_dma(struct gpmi_nand_data *,
- enum dma_data_direction dr);
-extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
- struct dma_async_tx_descriptor *);
-extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
- struct dma_async_tx_descriptor *);
+int common_nfc_set_geometry(struct gpmi_nand_data *);
+struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
+void prepare_data_dma(struct gpmi_nand_data *,
+ enum dma_data_direction dr);
+int start_dma_without_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
+int start_dma_with_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
/* GPMI-NAND helper function library */
-extern int gpmi_init(struct gpmi_nand_data *);
-extern int gpmi_extra_init(struct gpmi_nand_data *);
-extern void gpmi_clear_bch(struct gpmi_nand_data *);
-extern void gpmi_dump_info(struct gpmi_nand_data *);
-extern int bch_set_geometry(struct gpmi_nand_data *);
-extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
-extern int gpmi_send_command(struct gpmi_nand_data *);
-extern void gpmi_begin(struct gpmi_nand_data *);
-extern void gpmi_end(struct gpmi_nand_data *);
-extern int gpmi_read_data(struct gpmi_nand_data *);
-extern int gpmi_send_data(struct gpmi_nand_data *);
-extern int gpmi_send_page(struct gpmi_nand_data *,
- dma_addr_t payload, dma_addr_t auxiliary);
-extern int gpmi_read_page(struct gpmi_nand_data *,
- dma_addr_t payload, dma_addr_t auxiliary);
+int gpmi_init(struct gpmi_nand_data *);
+int gpmi_extra_init(struct gpmi_nand_data *);
+void gpmi_clear_bch(struct gpmi_nand_data *);
+void gpmi_dump_info(struct gpmi_nand_data *);
+int bch_set_geometry(struct gpmi_nand_data *);
+int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
+int gpmi_send_command(struct gpmi_nand_data *);
+void gpmi_begin(struct gpmi_nand_data *);
+void gpmi_end(struct gpmi_nand_data *);
+int gpmi_read_data(struct gpmi_nand_data *);
+int gpmi_send_data(struct gpmi_nand_data *);
+int gpmi_send_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
+int gpmi_read_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
const u8 *src, size_t src_bit_off,
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index 0897261..cb86279 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -544,7 +544,7 @@ static int hisi_nand_read_page_hwecc(struct mtd_info *mtd,
int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
int stat_1, stat_2;
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
/* errors which can not be corrected by ECC */
@@ -574,8 +574,7 @@ static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
{
struct hinfc_host *host = nand_get_controller_data(chip);
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
if (host->irq_status & HINFC504_INTS_UE) {
host->irq_status = 0;
@@ -590,11 +589,11 @@ static int hisi_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static void hisi_nfc_host_init(struct hinfc_host *host)
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index ad827d4..613b00a 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -313,6 +313,7 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
uint32_t ctrl;
struct nand_chip *chip = &nand->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 id[2];
/* Request I/O resource. */
sprintf(res_name, "bank%d", bank);
@@ -335,17 +336,16 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
/* Retrieve the IDs from the first chip. */
chip->select_chip(mtd, 0);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- *nand_maf_id = chip->read_byte(mtd);
- *nand_dev_id = chip->read_byte(mtd);
+ nand_reset_op(chip);
+ nand_readid_op(chip, 0, id, sizeof(id));
+ *nand_maf_id = id[0];
+ *nand_dev_id = id[1];
} else {
/* Detect additional chip. */
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- if (*nand_maf_id != chip->read_byte(mtd)
- || *nand_dev_id != chip->read_byte(mtd)) {
+ nand_reset_op(chip);
+ nand_readid_op(chip, 0, id, sizeof(id));
+ if (*nand_maf_id != id[0] || *nand_dev_id != id[1]) {
ret = -ENODEV;
goto notfound_id;
}
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 5796468..e357948 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -461,7 +461,7 @@ static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
}
/* Writing Command and Address */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* For all sub-pages */
for (i = 0; i < host->mlcsubpages; i++) {
@@ -522,6 +522,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
memcpy(dma_buf, buf, mtd->writesize);
}
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
for (i = 0; i < host->mlcsubpages; i++) {
/* Start Encode */
writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
@@ -550,7 +552,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
/* Wait for Controller Ready */
lpc32xx_waitfunc_controller(mtd, chip);
}
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index b61f28a..5f7cc6d 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -399,10 +399,7 @@ static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int
static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/*
@@ -411,17 +408,8 @@ static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- /* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
/*
@@ -632,7 +620,7 @@ static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
/* Issue read command */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* Read data and oob, calculate ECC */
status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
@@ -675,7 +663,7 @@ static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
int page)
{
/* Issue read command */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* Raw reads can just use the FIFO interface */
chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
@@ -698,6 +686,8 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
uint8_t *pb;
int error;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
/* Write data, calculate ECC on outbound data */
error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
if (error)
@@ -716,7 +706,8 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
/* Write ECC data to device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
/*
@@ -729,9 +720,11 @@ static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
int oob_required, int page)
{
/* Raw writes can just use the FIFO interface */
- chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+ nand_prog_page_begin_op(chip, page, 0, buf,
+ chip->ecc.size * chip->ecc.steps);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
new file mode 100644
index 0000000..2196f2a
--- /dev/null
+++ b/drivers/mtd/nand/marvell_nand.c
@@ -0,0 +1,2896 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell NAND flash controller driver
+ *
+ * Copyright (C) 2017 Marvell
+ * Author: Miquel RAYNAL <miquel.raynal@free-electrons.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_platform.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <asm/unaligned.h>
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
+#include <linux/platform_data/mtd-nand-pxa3xx.h>
+
+/* Data FIFO granularity, FIFO reads/writes must be a multiple of this length */
+#define FIFO_DEPTH 8
+#define FIFO_REP(x) (x / sizeof(u32))
+#define BCH_SEQ_READS (32 / FIFO_DEPTH)
+/* NFC does not support transfers of larger chunks at a time */
+#define MAX_CHUNK_SIZE 2112
+/* NFCv1 cannot read more that 7 bytes of ID */
+#define NFCV1_READID_LEN 7
+/* Polling is done at a pace of POLL_PERIOD us until POLL_TIMEOUT is reached */
+#define POLL_PERIOD 0
+#define POLL_TIMEOUT 100000
+/* Interrupt maximum wait period in ms */
+#define IRQ_TIMEOUT 1000
+/* Latency in clock cycles between SoC pins and NFC logic */
+#define MIN_RD_DEL_CNT 3
+/* Maximum number of contiguous address cycles */
+#define MAX_ADDRESS_CYC_NFCV1 5
+#define MAX_ADDRESS_CYC_NFCV2 7
+/* System control registers/bits to enable the NAND controller on some SoCs */
+#define GENCONF_SOC_DEVICE_MUX 0x208
+#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
+#define GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST BIT(20)
+#define GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST BIT(21)
+#define GENCONF_SOC_DEVICE_MUX_NFC_INT_EN BIT(25)
+#define GENCONF_CLK_GATING_CTRL 0x220
+#define GENCONF_CLK_GATING_CTRL_ND_GATE BIT(2)
+#define GENCONF_ND_CLK_CTRL 0x700
+#define GENCONF_ND_CLK_CTRL_EN BIT(0)
+
+/* NAND controller data flash control register */
+#define NDCR 0x00
+#define NDCR_ALL_INT GENMASK(11, 0)
+#define NDCR_CS1_CMDDM BIT(7)
+#define NDCR_CS0_CMDDM BIT(8)
+#define NDCR_RDYM BIT(11)
+#define NDCR_ND_ARB_EN BIT(12)
+#define NDCR_RA_START BIT(15)
+#define NDCR_RD_ID_CNT(x) (min_t(unsigned int, x, 0x7) << 16)
+#define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0)
+#define NDCR_DWIDTH_M BIT(26)
+#define NDCR_DWIDTH_C BIT(27)
+#define NDCR_ND_RUN BIT(28)
+#define NDCR_DMA_EN BIT(29)
+#define NDCR_ECC_EN BIT(30)
+#define NDCR_SPARE_EN BIT(31)
+#define NDCR_GENERIC_FIELDS_MASK (~(NDCR_RA_START | NDCR_PAGE_SZ(2048) | \
+ NDCR_DWIDTH_M | NDCR_DWIDTH_C))
+
+/* NAND interface timing parameter 0 register */
+#define NDTR0 0x04
+#define NDTR0_TRP(x) ((min_t(unsigned int, x, 0xF) & 0x7) << 0)
+#define NDTR0_TRH(x) (min_t(unsigned int, x, 0x7) << 3)
+#define NDTR0_ETRP(x) ((min_t(unsigned int, x, 0xF) & 0x8) << 3)
+#define NDTR0_SEL_NRE_EDGE BIT(7)
+#define NDTR0_TWP(x) (min_t(unsigned int, x, 0x7) << 8)
+#define NDTR0_TWH(x) (min_t(unsigned int, x, 0x7) << 11)
+#define NDTR0_TCS(x) (min_t(unsigned int, x, 0x7) << 16)
+#define NDTR0_TCH(x) (min_t(unsigned int, x, 0x7) << 19)
+#define NDTR0_RD_CNT_DEL(x) (min_t(unsigned int, x, 0xF) << 22)
+#define NDTR0_SELCNTR BIT(26)
+#define NDTR0_TADL(x) (min_t(unsigned int, x, 0x1F) << 27)
+
+/* NAND interface timing parameter 1 register */
+#define NDTR1 0x0C
+#define NDTR1_TAR(x) (min_t(unsigned int, x, 0xF) << 0)
+#define NDTR1_TWHR(x) (min_t(unsigned int, x, 0xF) << 4)
+#define NDTR1_TRHW(x) (min_t(unsigned int, x / 16, 0x3) << 8)
+#define NDTR1_PRESCALE BIT(14)
+#define NDTR1_WAIT_MODE BIT(15)
+#define NDTR1_TR(x) (min_t(unsigned int, x, 0xFFFF) << 16)
+
+/* NAND controller status register */
+#define NDSR 0x14
+#define NDSR_WRCMDREQ BIT(0)
+#define NDSR_RDDREQ BIT(1)
+#define NDSR_WRDREQ BIT(2)
+#define NDSR_CORERR BIT(3)
+#define NDSR_UNCERR BIT(4)
+#define NDSR_CMDD(cs) BIT(8 - cs)
+#define NDSR_RDY(rb) BIT(11 + rb)
+#define NDSR_ERRCNT(x) ((x >> 16) & 0x1F)
+
+/* NAND ECC control register */
+#define NDECCCTRL 0x28
+#define NDECCCTRL_BCH_EN BIT(0)
+
+/* NAND controller data buffer register */
+#define NDDB 0x40
+
+/* NAND controller command buffer 0 register */
+#define NDCB0 0x48
+#define NDCB0_CMD1(x) ((x & 0xFF) << 0)
+#define NDCB0_CMD2(x) ((x & 0xFF) << 8)
+#define NDCB0_ADDR_CYC(x) ((x & 0x7) << 16)
+#define NDCB0_ADDR_GET_NUM_CYC(x) (((x) >> 16) & 0x7)
+#define NDCB0_DBC BIT(19)
+#define NDCB0_CMD_TYPE(x) ((x & 0x7) << 21)
+#define NDCB0_CSEL BIT(24)
+#define NDCB0_RDY_BYP BIT(27)
+#define NDCB0_LEN_OVRD BIT(28)
+#define NDCB0_CMD_XTYPE(x) ((x & 0x7) << 29)
+
+/* NAND controller command buffer 1 register */
+#define NDCB1 0x4C
+#define NDCB1_COLS(x) ((x & 0xFFFF) << 0)
+#define NDCB1_ADDRS_PAGE(x) (x << 16)
+
+/* NAND controller command buffer 2 register */
+#define NDCB2 0x50
+#define NDCB2_ADDR5_PAGE(x) (((x >> 16) & 0xFF) << 0)
+#define NDCB2_ADDR5_CYC(x) ((x & 0xFF) << 0)
+
+/* NAND controller command buffer 3 register */
+#define NDCB3 0x54
+#define NDCB3_ADDR6_CYC(x) ((x & 0xFF) << 16)
+#define NDCB3_ADDR7_CYC(x) ((x & 0xFF) << 24)
+
+/* NAND controller command buffer 0 register 'type' and 'xtype' fields */
+#define TYPE_READ 0
+#define TYPE_WRITE 1
+#define TYPE_ERASE 2
+#define TYPE_READ_ID 3
+#define TYPE_STATUS 4
+#define TYPE_RESET 5
+#define TYPE_NAKED_CMD 6
+#define TYPE_NAKED_ADDR 7
+#define TYPE_MASK 7
+#define XTYPE_MONOLITHIC_RW 0
+#define XTYPE_LAST_NAKED_RW 1
+#define XTYPE_FINAL_COMMAND 3
+#define XTYPE_READ 4
+#define XTYPE_WRITE_DISPATCH 4
+#define XTYPE_NAKED_RW 5
+#define XTYPE_COMMAND_DISPATCH 6
+#define XTYPE_MASK 7
+
+/**
+ * Marvell ECC engine works differently than the others, in order to limit the
+ * size of the IP, hardware engineers chose to set a fixed strength at 16 bits
+ * per subpage, and depending on a the desired strength needed by the NAND chip,
+ * a particular layout mixing data/spare/ecc is defined, with a possible last
+ * chunk smaller that the others.
+ *
+ * @writesize: Full page size on which the layout applies
+ * @chunk: Desired ECC chunk size on which the layout applies
+ * @strength: Desired ECC strength (per chunk size bytes) on which the
+ * layout applies
+ * @nchunks: Total number of chunks
+ * @full_chunk_cnt: Number of full-sized chunks, which is the number of
+ * repetitions of the pattern:
+ * (data_bytes + spare_bytes + ecc_bytes).
+ * @data_bytes: Number of data bytes per chunk
+ * @spare_bytes: Number of spare bytes per chunk
+ * @ecc_bytes: Number of ecc bytes per chunk
+ * @last_data_bytes: Number of data bytes in the last chunk
+ * @last_spare_bytes: Number of spare bytes in the last chunk
+ * @last_ecc_bytes: Number of ecc bytes in the last chunk
+ */
+struct marvell_hw_ecc_layout {
+ /* Constraints */
+ int writesize;
+ int chunk;
+ int strength;
+ /* Corresponding layout */
+ int nchunks;
+ int full_chunk_cnt;
+ int data_bytes;
+ int spare_bytes;
+ int ecc_bytes;
+ int last_data_bytes;
+ int last_spare_bytes;
+ int last_ecc_bytes;
+};
+
+#define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb) \
+ { \
+ .writesize = ws, \
+ .chunk = dc, \
+ .strength = ds, \
+ .nchunks = nc, \
+ .full_chunk_cnt = fcc, \
+ .data_bytes = db, \
+ .spare_bytes = sb, \
+ .ecc_bytes = eb, \
+ .last_data_bytes = ldb, \
+ .last_spare_bytes = lsb, \
+ .last_ecc_bytes = leb, \
+ }
+
+/* Layouts explained in AN-379_Marvell_SoC_NFC_ECC */
+static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
+ MARVELL_LAYOUT( 512, 512, 1, 1, 1, 512, 8, 8, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 1, 1, 1, 2048, 40, 24, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
+};
+
+/**
+ * The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection
+ * is made by a field in NDCB0 register, and in another field in NDCB2 register.
+ * The datasheet describes the logic with an error: ADDR5 field is once
+ * declared at the beginning of NDCB2, and another time at its end. Because the
+ * ADDR5 field of NDCB2 may be used by other bytes, it would be more logical
+ * to use the last bit of this field instead of the first ones.
+ *
+ * @cs: Wanted CE lane.
+ * @ndcb0_csel: Value of the NDCB0 register with or without the flag
+ * selecting the wanted CE lane. This is set once when
+ * the Device Tree is probed.
+ * @rb: Ready/Busy pin for the flash chip
+ */
+struct marvell_nand_chip_sel {
+ unsigned int cs;
+ u32 ndcb0_csel;
+ unsigned int rb;
+};
+
+/**
+ * NAND chip structure: stores NAND chip device related information
+ *
+ * @chip: Base NAND chip structure
+ * @node: Used to store NAND chips into a list
+ * @layout NAND layout when using hardware ECC
+ * @ndcr: Controller register value for this NAND chip
+ * @ndtr0: Timing registers 0 value for this NAND chip
+ * @ndtr1: Timing registers 1 value for this NAND chip
+ * @selected_die: Current active CS
+ * @nsels: Number of CS lines required by the NAND chip
+ * @sels: Array of CS lines descriptions
+ */
+struct marvell_nand_chip {
+ struct nand_chip chip;
+ struct list_head node;
+ const struct marvell_hw_ecc_layout *layout;
+ u32 ndcr;
+ u32 ndtr0;
+ u32 ndtr1;
+ int addr_cyc;
+ int selected_die;
+ unsigned int nsels;
+ struct marvell_nand_chip_sel sels[0];
+};
+
+static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct marvell_nand_chip, chip);
+}
+
+static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
+ *nand)
+{
+ return &nand->sels[nand->selected_die];
+}
+
+/**
+ * NAND controller capabilities for distinction between compatible strings
+ *
+ * @max_cs_nb: Number of Chip Select lines available
+ * @max_rb_nb: Number of Ready/Busy lines available
+ * @need_system_controller: Indicates if the SoC needs to have access to the
+ * system controller (ie. to enable the NAND controller)
+ * @legacy_of_bindings: Indicates if DT parsing must be done using the old
+ * fashion way
+ * @is_nfcv2: NFCv2 has numerous enhancements compared to NFCv1, ie.
+ * BCH error detection and correction algorithm,
+ * NDCB3 register has been added
+ * @use_dma: Use dma for data transfers
+ */
+struct marvell_nfc_caps {
+ unsigned int max_cs_nb;
+ unsigned int max_rb_nb;
+ bool need_system_controller;
+ bool legacy_of_bindings;
+ bool is_nfcv2;
+ bool use_dma;
+};
+
+/**
+ * NAND controller structure: stores Marvell NAND controller information
+ *
+ * @controller: Base controller structure
+ * @dev: Parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @ecc_clk: ECC block clock, two times the NAND controller clock
+ * @complete: Completion object to wait for NAND controller events
+ * @assigned_cs: Bitmask describing already assigned CS lines
+ * @chips: List containing all the NAND chips attached to
+ * this NAND controller
+ * @caps: NAND controller capabilities for each compatible string
+ * @dma_chan: DMA channel (NFCv1 only)
+ * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
+ */
+struct marvell_nfc {
+ struct nand_hw_control controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *ecc_clk;
+ struct completion complete;
+ unsigned long assigned_cs;
+ struct list_head chips;
+ struct nand_chip *selected_chip;
+ const struct marvell_nfc_caps *caps;
+
+ /* DMA (NFCv1 only) */
+ bool use_dma;
+ struct dma_chan *dma_chan;
+ u8 *dma_buf;
+};
+
+static inline struct marvell_nfc *to_marvell_nfc(struct nand_hw_control *ctrl)
+{
+ return container_of(ctrl, struct marvell_nfc, controller);
+}
+
+/**
+ * NAND controller timings expressed in NAND Controller clock cycles
+ *
+ * @tRP: ND_nRE pulse width
+ * @tRH: ND_nRE high duration
+ * @tWP: ND_nWE pulse time
+ * @tWH: ND_nWE high duration
+ * @tCS: Enable signal setup time
+ * @tCH: Enable signal hold time
+ * @tADL: Address to write data delay
+ * @tAR: ND_ALE low to ND_nRE low delay
+ * @tWHR: ND_nWE high to ND_nRE low for status read
+ * @tRHW: ND_nRE high duration, read to write delay
+ * @tR: ND_nWE high to ND_nRE low for read
+ */
+struct marvell_nfc_timings {
+ /* NDTR0 fields */
+ unsigned int tRP;
+ unsigned int tRH;
+ unsigned int tWP;
+ unsigned int tWH;
+ unsigned int tCS;
+ unsigned int tCH;
+ unsigned int tADL;
+ /* NDTR1 fields */
+ unsigned int tAR;
+ unsigned int tWHR;
+ unsigned int tRHW;
+ unsigned int tR;
+};
+
+/**
+ * Derives a duration in numbers of clock cycles.
+ *
+ * @ps: Duration in pico-seconds
+ * @period_ns: Clock period in nano-seconds
+ *
+ * Convert the duration in nano-seconds, then divide by the period and
+ * return the number of clock periods.
+ */
+#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns))
+
+/**
+ * NAND driver structure filled during the parsing of the ->exec_op() subop
+ * subset of instructions.
+ *
+ * @ndcb: Array of values written to NDCBx registers
+ * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle
+ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
+ * @rdy_delay_ns: Optional delay after waiting for the RB pin
+ * @data_delay_ns: Optional delay after the data xfer
+ * @data_instr_idx: Index of the data instruction in the subop
+ * @data_instr: Pointer to the data instruction in the subop
+ */
+struct marvell_nfc_op {
+ u32 ndcb[4];
+ unsigned int cle_ale_delay_ns;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ unsigned int data_delay_ns;
+ unsigned int data_instr_idx;
+ const struct nand_op_instr *data_instr;
+};
+
+/*
+ * Internal helper to conditionnally apply a delay (from the above structure,
+ * most of the time).
+ */
+static void cond_delay(unsigned int ns)
+{
+ if (!ns)
+ return;
+
+ if (ns < 10000)
+ ndelay(ns);
+ else
+ udelay(DIV_ROUND_UP(ns, 1000));
+}
+
+/*
+ * The controller has many flags that could generate interrupts, most of them
+ * are disabled and polling is used. For the very slow signals, using interrupts
+ * may relax the CPU charge.
+ */
+static void marvell_nfc_disable_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ /* Writing 1 disables the interrupt */
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg | int_mask, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ /* Writing 0 enables the interrupt */
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ writel_relaxed(int_mask, nfc->regs + NDSR);
+}
+
+static void marvell_nfc_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr;
+
+ /*
+ * Callers of this function do not verify if the NAND is using a 16-bit
+ * an 8-bit bus for normal operations, so we need to take care of that
+ * here by leaving the configuration unchanged if the NAND does not have
+ * the NAND_BUSWIDTH_16 flag set.
+ */
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ return;
+
+ ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (force_8bit)
+ ndcr &= ~(NDCR_DWIDTH_M | NDCR_DWIDTH_C);
+ else
+ ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ writel_relaxed(ndcr, nfc->regs + NDCR);
+}
+
+static int marvell_nfc_wait_ndrun(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 val;
+ int ret;
+
+ /*
+ * The command is being processed, wait for the ND_RUN bit to be
+ * cleared by the NFC. If not, we must clear it by hand.
+ */
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDCR, val,
+ (val & NDCR_ND_RUN) == 0,
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on NAND controller run mode\n");
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Any time a command has to be sent to the controller, the following sequence
+ * has to be followed:
+ * - call marvell_nfc_prepare_cmd()
+ * -> activate the ND_RUN bit that will kind of 'start a job'
+ * -> wait the signal indicating the NFC is waiting for a command
+ * - send the command (cmd and address cycles)
+ * - enventually send or receive the data
+ * - call marvell_nfc_end_cmd() with the corresponding flag
+ * -> wait the flag to be triggered or cancel the job with a timeout
+ *
+ * The following helpers are here to factorize the code a bit so that
+ * specialized functions responsible for executing the actual NAND
+ * operations do not have to replicate the same code blocks.
+ */
+static int marvell_nfc_prepare_cmd(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr, val;
+ int ret;
+
+ /* Poll ND_RUN and clear NDSR before issuing any command */
+ ret = marvell_nfc_wait_ndrun(chip);
+ if (ret) {
+ dev_err(nfc->dev, "Last operation did not succeed\n");
+ return ret;
+ }
+
+ ndcr = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(readl(nfc->regs + NDSR), nfc->regs + NDSR);
+
+ /* Assert ND_RUN bit and wait the NFC to be ready */
+ writel_relaxed(ndcr | NDCR_ND_RUN, nfc->regs + NDCR);
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
+ val & NDSR_WRCMDREQ,
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on WRCMDRE\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Command may be written, clear WRCMDREQ status bit */
+ writel_relaxed(NDSR_WRCMDREQ, nfc->regs + NDSR);
+
+ return 0;
+}
+
+static void marvell_nfc_send_cmd(struct nand_chip *chip,
+ struct marvell_nfc_op *nfc_op)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ dev_dbg(nfc->dev, "\nNDCR: 0x%08x\n"
+ "NDCB0: 0x%08x\nNDCB1: 0x%08x\nNDCB2: 0x%08x\nNDCB3: 0x%08x\n",
+ (u32)readl_relaxed(nfc->regs + NDCR), nfc_op->ndcb[0],
+ nfc_op->ndcb[1], nfc_op->ndcb[2], nfc_op->ndcb[3]);
+
+ writel_relaxed(to_nand_sel(marvell_nand)->ndcb0_csel | nfc_op->ndcb[0],
+ nfc->regs + NDCB0);
+ writel_relaxed(nfc_op->ndcb[1], nfc->regs + NDCB0);
+ writel(nfc_op->ndcb[2], nfc->regs + NDCB0);
+
+ /*
+ * Write NDCB0 four times only if LEN_OVRD is set or if ADDR6 or ADDR7
+ * fields are used (only available on NFCv2).
+ */
+ if (nfc_op->ndcb[0] & NDCB0_LEN_OVRD ||
+ NDCB0_ADDR_GET_NUM_CYC(nfc_op->ndcb[0]) >= 6) {
+ if (!WARN_ON_ONCE(!nfc->caps->is_nfcv2))
+ writel(nfc_op->ndcb[3], nfc->regs + NDCB0);
+ }
+}
+
+static int marvell_nfc_end_cmd(struct nand_chip *chip, int flag,
+ const char *label)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
+ val & flag,
+ POLL_PERIOD, POLL_TIMEOUT);
+
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on %s (NDSR: 0x%08x)\n",
+ label, val);
+ if (nfc->dma_chan)
+ dmaengine_terminate_all(nfc->dma_chan);
+ return ret;
+ }
+
+ /*
+ * DMA function uses this helper to poll on CMDD bits without wanting
+ * them to be cleared.
+ */
+ if (nfc->use_dma && (readl_relaxed(nfc->regs + NDCR) & NDCR_DMA_EN))
+ return 0;
+
+ writel_relaxed(flag, nfc->regs + NDSR);
+
+ return 0;
+}
+
+static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ int cs_flag = NDSR_CMDD(to_nand_sel(marvell_nand)->ndcb0_csel);
+
+ return marvell_nfc_end_cmd(chip, cs_flag, "CMDD");
+}
+
+static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int ret;
+
+ /* Timeout is expressed in ms */
+ if (!timeout_ms)
+ timeout_ms = IRQ_TIMEOUT;
+
+ init_completion(&nfc->complete);
+
+ marvell_nfc_enable_int(nfc, NDCR_RDYM);
+ ret = wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms));
+ marvell_nfc_disable_int(nfc, NDCR_RDYM);
+ marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
+ if (!ret) {
+ dev_err(nfc->dev, "Timeout waiting for RB signal\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_select_chip(struct mtd_info *mtd, int die_nr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr_generic;
+
+ if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
+ return;
+
+ if (die_nr < 0 || die_nr >= marvell_nand->nsels) {
+ nfc->selected_chip = NULL;
+ marvell_nand->selected_die = -1;
+ return;
+ }
+
+ /*
+ * Do not change the timing registers when using the DT property
+ * marvell,nand-keep-config; in that case ->ndtr0 and ->ndtr1 from the
+ * marvell_nand structure are supposedly empty.
+ */
+ writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
+ writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
+
+ /*
+ * Reset the NDCR register to a clean state for this particular chip,
+ * also clear ND_RUN bit.
+ */
+ ndcr_generic = readl_relaxed(nfc->regs + NDCR) &
+ NDCR_GENERIC_FIELDS_MASK & ~NDCR_ND_RUN;
+ writel_relaxed(ndcr_generic | marvell_nand->ndcr, nfc->regs + NDCR);
+
+ /* Also reset the interrupt status register */
+ marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+
+ nfc->selected_chip = chip;
+ marvell_nand->selected_die = die_nr;
+}
+
+static irqreturn_t marvell_nfc_isr(int irq, void *dev_id)
+{
+ struct marvell_nfc *nfc = dev_id;
+ u32 st = readl_relaxed(nfc->regs + NDSR);
+ u32 ien = (~readl_relaxed(nfc->regs + NDCR)) & NDCR_ALL_INT;
+
+ /*
+ * RDY interrupt mask is one bit in NDCR while there are two status
+ * bit in NDSR (RDY[cs0/cs2] and RDY[cs1/cs3]).
+ */
+ if (st & NDSR_RDY(1))
+ st |= NDSR_RDY(0);
+
+ if (!(st & ien))
+ return IRQ_NONE;
+
+ marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
+
+ if (!(st & (NDSR_RDDREQ | NDSR_WRDREQ | NDSR_WRCMDREQ)))
+ complete(&nfc->complete);
+
+ return IRQ_HANDLED;
+}
+
+/* HW ECC related functions */
+static void marvell_nfc_enable_hw_ecc(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (!(ndcr & NDCR_ECC_EN)) {
+ writel_relaxed(ndcr | NDCR_ECC_EN, nfc->regs + NDCR);
+
+ /*
+ * When enabling BCH, set threshold to 0 to always know the
+ * number of corrected bitflips.
+ */
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
+ }
+}
+
+static void marvell_nfc_disable_hw_ecc(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (ndcr & NDCR_ECC_EN) {
+ writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR);
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+ }
+}
+
+/* DMA related helpers */
+static void marvell_nfc_enable_dma(struct marvell_nfc *nfc)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg | NDCR_DMA_EN, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_disable_dma(struct marvell_nfc *nfc)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg & ~NDCR_DMA_EN, nfc->regs + NDCR);
+}
+
+/* Read/write PIO/DMA accessors */
+static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
+ enum dma_data_direction direction,
+ unsigned int len)
+{
+ unsigned int dma_len = min_t(int, ALIGN(len, 32), MAX_CHUNK_SIZE);
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+ dma_cookie_t cookie;
+ int ret;
+
+ marvell_nfc_enable_dma(nfc);
+ /* Prepare the DMA transfer */
+ sg_init_one(&sg, nfc->dma_buf, dma_len);
+ dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
+ direction == DMA_FROM_DEVICE ?
+ DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
+ return -ENXIO;
+ }
+
+ /* Do the task and wait for it to finish */
+ cookie = dmaengine_submit(tx);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ return -EIO;
+
+ dma_async_issue_pending(nfc->dma_chan);
+ ret = marvell_nfc_wait_cmdd(nfc->selected_chip);
+ dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ marvell_nfc_disable_dma(nfc);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n",
+ dmaengine_tx_status(nfc->dma_chan, cookie, NULL));
+ dmaengine_terminate_all(nfc->dma_chan);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in,
+ unsigned int len)
+{
+ unsigned int last_len = len % FIFO_DEPTH;
+ unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
+ int i;
+
+ for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
+ ioread32_rep(nfc->regs + NDDB, in + i, FIFO_REP(FIFO_DEPTH));
+
+ if (last_len) {
+ u8 tmp_buf[FIFO_DEPTH];
+
+ ioread32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
+ memcpy(in + last_full_offset, tmp_buf, last_len);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_xfer_data_out_pio(struct marvell_nfc *nfc, const u8 *out,
+ unsigned int len)
+{
+ unsigned int last_len = len % FIFO_DEPTH;
+ unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
+ int i;
+
+ for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
+ iowrite32_rep(nfc->regs + NDDB, out + i, FIFO_REP(FIFO_DEPTH));
+
+ if (last_len) {
+ u8 tmp_buf[FIFO_DEPTH];
+
+ memcpy(tmp_buf, out + last_full_offset, last_len);
+ iowrite32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_check_empty_chunk(struct nand_chip *chip,
+ u8 *data, int data_len,
+ u8 *spare, int spare_len,
+ u8 *ecc, int ecc_len,
+ unsigned int *max_bitflips)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bf;
+
+ /*
+ * Blank pages (all 0xFF) that have not been written may be recognized
+ * as bad if bitflips occur, so whenever an uncorrectable error occurs,
+ * check if the entire page (with ECC bytes) is actually blank or not.
+ */
+ if (!data)
+ data_len = 0;
+ if (!spare)
+ spare_len = 0;
+ if (!ecc)
+ ecc_len = 0;
+
+ bf = nand_check_erased_ecc_chunk(data, data_len, ecc, ecc_len,
+ spare, spare_len, chip->ecc.strength);
+ if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ return;
+ }
+
+ /* Update the stats and max_bitflips */
+ mtd->ecc_stats.corrected += bf;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
+}
+
+/*
+ * Check a chunk is correct or not according to hardware ECC engine.
+ * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
+ * mtd->ecc_stats.failure is not, the function will instead return a non-zero
+ * value indicating that a check on the emptyness of the subpage must be
+ * performed before declaring the subpage corrupted.
+ */
+static int marvell_nfc_hw_ecc_correct(struct nand_chip *chip,
+ unsigned int *max_bitflips)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int bf = 0;
+ u32 ndsr;
+
+ ndsr = readl_relaxed(nfc->regs + NDSR);
+
+ /* Check uncorrectable error flag */
+ if (ndsr & NDSR_UNCERR) {
+ writel_relaxed(ndsr, nfc->regs + NDSR);
+
+ /*
+ * Do not increment ->ecc_stats.failed now, instead, return a
+ * non-zero value to indicate that this chunk was apparently
+ * bad, and it should be check to see if it empty or not. If
+ * the chunk (with ECC bytes) is not declared empty, the calling
+ * function must increment the failure count.
+ */
+ return -EBADMSG;
+ }
+
+ /* Check correctable error flag */
+ if (ndsr & NDSR_CORERR) {
+ writel_relaxed(ndsr, nfc->regs + NDSR);
+
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ bf = NDSR_ERRCNT(ndsr);
+ else
+ bf = 1;
+ }
+
+ /* Update the stats and max_bitflips */
+ mtd->ecc_stats.corrected += bf;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
+
+ return 0;
+}
+
+/* Hamming read helpers */
+static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
+ u8 *data_buf, u8 *oob_buf,
+ bool raw, int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_DBC |
+ NDCB0_CMD1(NAND_CMD_READ0) |
+ NDCB0_CMD2(NAND_CMD_READSTART),
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ };
+ unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ int ret;
+
+ /* NFCv2 needs more information about the operation being executed */
+ if (nfc->caps->is_nfcv2)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (data/oob)");
+ if (ret)
+ return ret;
+
+ /*
+ * Read the page then the OOB area. Unlike what is shown in current
+ * documentation, spare bytes are protected by the ECC engine, and must
+ * be at the beginning of the OOB area or running this driver on legacy
+ * systems will prevent the discovery of the BBM/BBT.
+ */
+ if (nfc->use_dma) {
+ marvell_nfc_xfer_data_dma(nfc, DMA_FROM_DEVICE,
+ lt->data_bytes + oob_bytes);
+ memcpy(data_buf, nfc->dma_buf, lt->data_bytes);
+ memcpy(oob_buf, nfc->dma_buf + lt->data_bytes, oob_bytes);
+ } else {
+ marvell_nfc_xfer_data_in_pio(nfc, data_buf, lt->data_bytes);
+ marvell_nfc_xfer_data_in_pio(nfc, oob_buf, oob_bytes);
+ }
+
+ ret = marvell_nfc_wait_cmdd(chip);
+
+ return ret;
+}
+
+static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+static int marvell_nfc_hw_ecc_hmg_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required,
+ int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ unsigned int full_sz = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int max_bitflips = 0, ret;
+ u8 *raw_buf;
+
+ marvell_nfc_enable_hw_ecc(chip);
+ marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
+ page);
+ ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (!ret)
+ return max_bitflips;
+
+ /*
+ * When ECC failures are detected, check if the full page has been
+ * written or not. Ignore the failure if it is actually empty.
+ */
+ raw_buf = kmalloc(full_sz, GFP_KERNEL);
+ if (!raw_buf)
+ return -ENOMEM;
+
+ marvell_nfc_hw_ecc_hmg_do_read_page(chip, raw_buf, raw_buf +
+ lt->data_bytes, true, page);
+ marvell_nfc_check_empty_chunk(chip, raw_buf, full_sz, NULL, 0, NULL, 0,
+ &max_bitflips);
+ kfree(raw_buf);
+
+ return max_bitflips;
+}
+
+/*
+ * Spare area in Hamming layouts is not protected by the ECC engine (even if
+ * it appears before the ECC bytes when reading), the ->read_oob_raw() function
+ * also stands for ->read_oob().
+ */
+static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, chip->data_buf,
+ chip->oob_poi, true, page);
+}
+
+/* Hamming write helpers */
+static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
+ const u8 *data_buf,
+ const u8 *oob_buf, bool raw,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN) |
+ NDCB0_CMD2(NAND_CMD_PAGEPROG) |
+ NDCB0_DBC,
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ };
+ unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ int ret;
+
+ /* NFCv2 needs more information about the operation being executed */
+ if (nfc->caps->is_nfcv2)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
+ "WRDREQ while loading FIFO (data)");
+ if (ret)
+ return ret;
+
+ /* Write the page then the OOB area */
+ if (nfc->use_dma) {
+ memcpy(nfc->dma_buf, data_buf, lt->data_bytes);
+ memcpy(nfc->dma_buf + lt->data_bytes, oob_buf, oob_bytes);
+ marvell_nfc_xfer_data_dma(nfc, DMA_TO_DEVICE, lt->data_bytes +
+ lt->ecc_bytes + lt->spare_bytes);
+ } else {
+ marvell_nfc_xfer_data_out_pio(nfc, data_buf, lt->data_bytes);
+ marvell_nfc_xfer_data_out_pio(nfc, oob_buf, oob_bytes);
+ }
+
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ ret = marvell_nfc_wait_op(chip,
+ chip->data_interface.timings.sdr.tPROG_max);
+ return ret;
+}
+
+static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+static int marvell_nfc_hw_ecc_hmg_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ marvell_nfc_enable_hw_ecc(chip);
+ ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ false, page);
+ marvell_nfc_disable_hw_ecc(chip);
+
+ return ret;
+}
+
+/*
+ * Spare area in Hamming layouts is not protected by the ECC engine (even if
+ * it appears before the ECC bytes when reading), the ->write_oob_raw() function
+ * also stands for ->write_oob().
+ */
+static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, chip->data_buf,
+ chip->oob_poi, true, page);
+}
+
+/* BCH read helpers */
+static int marvell_nfc_hw_ecc_bch_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ u8 *oob = chip->oob_poi;
+ int chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int ecc_len = lt->ecc_bytes;
+ int chunk;
+
+ if (oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* Update last chunk length */
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ ecc_len = lt->last_ecc_bytes;
+ }
+
+ /* Read data bytes*/
+ nand_change_read_column_op(chip, chunk * chunk_size,
+ buf + (lt->data_bytes * chunk),
+ data_len, false);
+
+ /* Read spare bytes */
+ nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
+ spare_len, false);
+
+ /* Read ECC bytes */
+ nand_read_data_op(chip, oob + ecc_offset +
+ (ALIGN(lt->ecc_bytes, 32) * chunk),
+ ecc_len, false);
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
+ u8 *data, unsigned int data_len,
+ u8 *spare, unsigned int spare_len,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int i, ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_LEN_OVRD,
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ .ndcb[3] = data_len + spare_len,
+ };
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return;
+
+ if (chunk == 0)
+ nfc_op.ndcb[0] |= NDCB0_DBC |
+ NDCB0_CMD1(NAND_CMD_READ0) |
+ NDCB0_CMD2(NAND_CMD_READSTART);
+
+ /*
+ * Trigger the naked read operation only on the last chunk.
+ * Otherwise, use monolithic read.
+ */
+ if (lt->nchunks == 1 || (chunk < lt->nchunks - 1))
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+ else
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+
+ /*
+ * According to the datasheet, when reading from NDDB
+ * with BCH enabled, after each 32 bytes reads, we
+ * have to make sure that the NDSR.RDDREQ bit is set.
+ *
+ * Drain the FIFO, 8 32-bit reads at a time, and skip
+ * the polling on the last read.
+ *
+ * Length is a multiple of 32 bytes, hence it is a multiple of 8 too.
+ */
+ for (i = 0; i < data_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
+ marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (data)");
+ marvell_nfc_xfer_data_in_pio(nfc, data,
+ FIFO_DEPTH * BCH_SEQ_READS);
+ data += FIFO_DEPTH * BCH_SEQ_READS;
+ }
+
+ for (i = 0; i < spare_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
+ marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (OOB)");
+ marvell_nfc_xfer_data_in_pio(nfc, spare,
+ FIFO_DEPTH * BCH_SEQ_READS);
+ spare += FIFO_DEPTH * BCH_SEQ_READS;
+ }
+}
+
+static int marvell_nfc_hw_ecc_bch_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required,
+ int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int data_len = lt->data_bytes, spare_len = lt->spare_bytes, ecc_len;
+ u8 *data = buf, *spare = chip->oob_poi, *ecc;
+ int max_bitflips = 0;
+ u32 failure_mask = 0;
+ int chunk, ecc_offset_in_page, ret;
+
+ /*
+ * With BCH, OOB is not fully used (and thus not read entirely), not
+ * expected bytes could show up at the end of the OOB buffer if not
+ * explicitly erased.
+ */
+ if (oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ marvell_nfc_enable_hw_ecc(chip);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* Update length for the last chunk */
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ }
+
+ /* Read the chunk and detect number of bitflips */
+ marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
+ spare, spare_len, page);
+ ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+ if (ret)
+ failure_mask |= BIT(chunk);
+
+ data += data_len;
+ spare += spare_len;
+ }
+
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (!failure_mask)
+ return max_bitflips;
+
+ /*
+ * Please note that dumping the ECC bytes during a normal read with OOB
+ * area would add a significant overhead as ECC bytes are "consumed" by
+ * the controller in normal mode and must be re-read in raw mode. To
+ * avoid dropping the performances, we prefer not to include them. The
+ * user should re-read the page in raw mode if ECC bytes are required.
+ *
+ * However, for any subpage read error reported by ->correct(), the ECC
+ * bytes must be read in raw mode and the full subpage must be checked
+ * to see if it is entirely empty of if there was an actual error.
+ */
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* No failure reported for this chunk, move to the next one */
+ if (!(failure_mask & BIT(chunk)))
+ continue;
+
+ /* Derive ECC bytes positions (in page/buffer) and length */
+ ecc = chip->oob_poi +
+ (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes +
+ (chunk * ALIGN(lt->ecc_bytes, 32));
+ ecc_offset_in_page =
+ (chunk * (lt->data_bytes + lt->spare_bytes +
+ lt->ecc_bytes)) +
+ (chunk < lt->full_chunk_cnt ?
+ lt->data_bytes + lt->spare_bytes :
+ lt->last_data_bytes + lt->last_spare_bytes);
+ ecc_len = chunk < lt->full_chunk_cnt ?
+ lt->ecc_bytes : lt->last_ecc_bytes;
+
+ /* Do the actual raw read of the ECC bytes */
+ nand_change_read_column_op(chip, ecc_offset_in_page,
+ ecc, ecc_len, false);
+
+ /* Derive data/spare bytes positions (in buffer) and length */
+ data = buf + (chunk * lt->data_bytes);
+ data_len = chunk < lt->full_chunk_cnt ?
+ lt->data_bytes : lt->last_data_bytes;
+ spare = chip->oob_poi + (chunk * (lt->spare_bytes +
+ lt->ecc_bytes));
+ spare_len = chunk < lt->full_chunk_cnt ?
+ lt->spare_bytes : lt->last_spare_bytes;
+
+ /* Check the entire chunk (data + spare + ecc) for emptyness */
+ marvell_nfc_check_empty_chunk(chip, data, data_len, spare,
+ spare_len, ecc, ecc_len,
+ &max_bitflips);
+ }
+
+ return max_bitflips;
+}
+
+static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return chip->ecc.read_page_raw(mtd, chip, chip->data_buf, true, page);
+}
+
+static int marvell_nfc_hw_ecc_bch_read_oob(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return chip->ecc.read_page(mtd, chip, chip->data_buf, true, page);
+}
+
+/* BCH write helpers */
+static int marvell_nfc_hw_ecc_bch_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int full_chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int ecc_len = lt->ecc_bytes;
+ int spare_offset = 0;
+ int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes;
+ int chunk;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ ecc_len = lt->last_ecc_bytes;
+ }
+
+ /* Point to the column of the next chunk */
+ nand_change_write_column_op(chip, chunk * full_chunk_size,
+ NULL, 0, false);
+
+ /* Write the data */
+ nand_write_data_op(chip, buf + (chunk * lt->data_bytes),
+ data_len, false);
+
+ if (!oob_required)
+ continue;
+
+ /* Write the spare bytes */
+ if (spare_len)
+ nand_write_data_op(chip, chip->oob_poi + spare_offset,
+ spare_len, false);
+
+ /* Write the ECC bytes */
+ if (ecc_len)
+ nand_write_data_op(chip, chip->oob_poi + ecc_offset,
+ ecc_len, false);
+
+ spare_offset += spare_len;
+ ecc_offset += ALIGN(ecc_len, 32);
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int
+marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
+ const u8 *data, unsigned int data_len,
+ const u8 *spare, unsigned int spare_len,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
+ .ndcb[3] = data_len + spare_len,
+ };
+
+ /*
+ * First operation dispatches the CMD_SEQIN command, issue the address
+ * cycles and asks for the first chunk of data.
+ * All operations in the middle (if any) will issue a naked write and
+ * also ask for data.
+ * Last operation (if any) asks for the last chunk of data through a
+ * last naked write.
+ */
+ if (chunk == 0) {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN);
+ nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
+ nfc_op.ndcb[2] |= NDCB2_ADDR5_PAGE(page);
+ } else if (chunk < lt->nchunks - 1) {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
+ } else {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ }
+
+ /* Always dispatch the PAGEPROG command on the last chunk */
+ if (chunk == lt->nchunks - 1)
+ nfc_op.ndcb[0] |= NDCB0_CMD2(NAND_CMD_PAGEPROG) | NDCB0_DBC;
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
+ "WRDREQ while loading FIFO (data)");
+ if (ret)
+ return ret;
+
+ /* Transfer the contents */
+ iowrite32_rep(nfc->regs + NDDB, data, FIFO_REP(data_len));
+ iowrite32_rep(nfc->regs + NDDB, spare, FIFO_REP(spare_len));
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ const u8 *data = buf;
+ const u8 *spare = chip->oob_poi;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int chunk, ret;
+
+ /* Spare data will be written anyway, so clear it to avoid garbage */
+ if (!oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ marvell_nfc_enable_hw_ecc(chip);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ }
+
+ marvell_nfc_hw_ecc_bch_write_chunk(chip, chunk, data, data_len,
+ spare, spare_len, page);
+ data += data_len;
+ spare += spare_len;
+
+ /*
+ * Waiting only for CMDD or PAGED is not enough, ECC are
+ * partially written. No flag is set once the operation is
+ * really finished but the ND_RUN bit is cleared, so wait for it
+ * before stepping into the next command.
+ */
+ marvell_nfc_wait_ndrun(chip);
+ }
+
+ ret = marvell_nfc_wait_op(chip,
+ chip->data_interface.timings.sdr.tPROG_max);
+
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return chip->ecc.write_page_raw(mtd, chip, chip->data_buf, true, page);
+}
+
+static int marvell_nfc_hw_ecc_bch_write_oob(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return chip->ecc.write_page(mtd, chip, chip->data_buf, true, page);
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void marvell_nfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct marvell_nfc_op *nfc_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ bool first_cmd = true;
+ unsigned int op_id;
+ int i;
+
+ /* Reset the input structure as most of its fields will be OR'ed */
+ memset(nfc_op, 0, sizeof(struct marvell_nfc_op));
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+ int len = nand_subop_get_data_len(subop, op_id);
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (first_cmd)
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD1(instr->ctx.cmd.opcode);
+ else
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD2(instr->ctx.cmd.opcode) |
+ NDCB0_DBC;
+
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ first_cmd = false;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ nfc_op->ndcb[0] |= NDCB0_ADDR_CYC(naddrs);
+
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ nfc_op->ndcb[1] |= addrs[i] << (8 * i);
+
+ if (naddrs >= 5)
+ nfc_op->ndcb[2] |= NDCB2_ADDR5_CYC(addrs[4]);
+ if (naddrs >= 6)
+ nfc_op->ndcb[3] |= NDCB3_ADDR6_CYC(addrs[5]);
+ if (naddrs == 7)
+ nfc_op->ndcb[3] |= NDCB3_ADDR7_CYC(addrs[6]);
+
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ);
+ if (nfc->caps->is_nfcv2) {
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
+ NDCB0_LEN_OVRD;
+ nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
+ }
+ nfc_op->data_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE);
+ if (nfc->caps->is_nfcv2) {
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
+ NDCB0_LEN_OVRD;
+ nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
+ }
+ nfc_op->data_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ nfc_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+}
+
+static int marvell_nfc_xfer_data_pio(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct marvell_nfc_op *nfc_op)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct nand_op_instr *instr = nfc_op->data_instr;
+ unsigned int op_id = nfc_op->data_instr_idx;
+ unsigned int len = nand_subop_get_data_len(subop, op_id);
+ unsigned int offset = nand_subop_get_data_start_off(subop, op_id);
+ bool reading = (instr->type == NAND_OP_DATA_IN_INSTR);
+ int ret;
+
+ if (instr->ctx.data.force_8bit)
+ marvell_nfc_force_byte_access(chip, true);
+
+ if (reading) {
+ u8 *in = instr->ctx.data.buf.in + offset;
+
+ ret = marvell_nfc_xfer_data_in_pio(nfc, in, len);
+ } else {
+ const u8 *out = instr->ctx.data.buf.out + offset;
+
+ ret = marvell_nfc_xfer_data_out_pio(nfc, out, len);
+ }
+
+ if (instr->ctx.data.force_8bit)
+ marvell_nfc_force_byte_access(chip, false);
+
+ return ret;
+}
+
+static int marvell_nfc_monolithic_access_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ bool reading;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
+ "RDDREQ/WRDREQ while draining raw data");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (reading) {
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ if (!reading) {
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+
+ /*
+ * NDCR ND_RUN bit should be cleared automatically at the end of each
+ * operation but experience shows that the behavior is buggy when it
+ * comes to writes (with LEN_OVRD). Clear it by hand in this case.
+ */
+ if (!reading) {
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_naked_access_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+
+ /*
+ * Naked access are different in that they need to be flagged as naked
+ * by the controller. Reset the controller registers fields that inform
+ * on the type and refill them according to the ongoing operation.
+ */
+ nfc_op.ndcb[0] &= ~(NDCB0_CMD_TYPE(TYPE_MASK) |
+ NDCB0_CMD_XTYPE(XTYPE_MASK));
+ switch (subop->instrs[0].type) {
+ case NAND_OP_CMD_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_CMD);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_ADDR);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE) |
+ NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ break;
+ default:
+ /* This should never happen */
+ break;
+ }
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+
+ if (!nfc_op.data_instr) {
+ ret = marvell_nfc_wait_cmdd(chip);
+ cond_delay(nfc_op.cle_ale_delay_ns);
+ return ret;
+ }
+
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
+ "RDDREQ/WRDREQ while draining raw data");
+ if (ret)
+ return ret;
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ /*
+ * NDCR ND_RUN bit should be cleared automatically at the end of each
+ * operation but experience shows that the behavior is buggy when it
+ * comes to writes (with LEN_OVRD). Clear it by hand in this case.
+ */
+ if (subop->instrs[0].type == NAND_OP_DATA_OUT_INSTR) {
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_naked_waitrdy_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return ret;
+}
+
+static int marvell_nfc_read_id_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ_ID);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while reading ID");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_STATUS);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while reading status");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_reset_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_RESET);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_erase_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_ERASE);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return 0;
+}
+
+static const struct nand_op_parser marvell_nfcv2_op_parser = NAND_OP_PARSER(
+ /* Monolithic reads/writes */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_monolithic_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC_NFCV2),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_monolithic_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ /* Naked commands */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_waitrdy_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static const struct nand_op_parser marvell_nfcv1_op_parser = NAND_OP_PARSER(
+ /* Naked commands not supported, use a function for each pattern */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_erase_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_reset_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_waitrdy_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static int marvell_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ if (nfc->caps->is_nfcv2)
+ return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
+ op, check_only);
+ else
+ return nand_op_parser_exec_op(chip, &marvell_nfcv1_op_parser,
+ op, check_only);
+}
+
+/*
+ * Layouts were broken in old pxa3xx_nand driver, these are supposed to be
+ * usable.
+ */
+static int marvell_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = (lt->full_chunk_cnt * lt->ecc_bytes) +
+ lt->last_ecc_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int marvell_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+
+ if (section)
+ return -ERANGE;
+
+ /*
+ * Bootrom looks in bytes 0 & 5 for bad blocks for the
+ * 4KB page / 4bit BCH combination.
+ */
+ if (mtd->writesize == SZ_4K && lt->data_bytes == SZ_2K)
+ oobregion->offset = 6;
+ else
+ oobregion->offset = 2;
+
+ oobregion->length = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = {
+ .ecc = marvell_nand_ooblayout_ecc,
+ .free = marvell_nand_ooblayout_free,
+};
+
+static int marvell_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *l;
+ int i;
+
+ if (!nfc->caps->is_nfcv2 &&
+ (mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) {
+ dev_err(nfc->dev,
+ "NFCv1: writesize (%d) cannot be bigger than a chunk (%d)\n",
+ mtd->writesize, MAX_CHUNK_SIZE - mtd->oobsize);
+ return -ENOTSUPP;
+ }
+
+ to_marvell_nand(chip)->layout = NULL;
+ for (i = 0; i < ARRAY_SIZE(marvell_nfc_layouts); i++) {
+ l = &marvell_nfc_layouts[i];
+ if (mtd->writesize == l->writesize &&
+ ecc->size == l->chunk && ecc->strength == l->strength) {
+ to_marvell_nand(chip)->layout = l;
+ break;
+ }
+ }
+
+ if (!to_marvell_nand(chip)->layout ||
+ (!nfc->caps->is_nfcv2 && ecc->strength > 1)) {
+ dev_err(nfc->dev,
+ "ECC strength %d at page size %d is not supported\n",
+ ecc->strength, mtd->writesize);
+ return -ENOTSUPP;
+ }
+
+ mtd_set_ooblayout(mtd, &marvell_nand_ooblayout_ops);
+ ecc->steps = l->nchunks;
+ ecc->size = l->data_bytes;
+
+ if (ecc->strength == 1) {
+ chip->ecc.algo = NAND_ECC_HAMMING;
+ ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw;
+ ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page;
+ ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw;
+ ecc->read_oob = ecc->read_oob_raw;
+ ecc->write_page_raw = marvell_nfc_hw_ecc_hmg_write_page_raw;
+ ecc->write_page = marvell_nfc_hw_ecc_hmg_write_page;
+ ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw;
+ ecc->write_oob = ecc->write_oob_raw;
+ } else {
+ chip->ecc.algo = NAND_ECC_BCH;
+ ecc->strength = 16;
+ ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw;
+ ecc->read_page = marvell_nfc_hw_ecc_bch_read_page;
+ ecc->read_oob_raw = marvell_nfc_hw_ecc_bch_read_oob_raw;
+ ecc->read_oob = marvell_nfc_hw_ecc_bch_read_oob;
+ ecc->write_page_raw = marvell_nfc_hw_ecc_bch_write_page_raw;
+ ecc->write_page = marvell_nfc_hw_ecc_bch_write_page;
+ ecc->write_oob_raw = marvell_nfc_hw_ecc_bch_write_oob_raw;
+ ecc->write_oob = marvell_nfc_hw_ecc_bch_write_oob;
+ }
+
+ return 0;
+}
+
+static int marvell_nand_ecc_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int ret;
+
+ if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) {
+ if (chip->ecc_step_ds && chip->ecc_strength_ds) {
+ ecc->size = chip->ecc_step_ds;
+ ecc->strength = chip->ecc_strength_ds;
+ } else {
+ dev_info(nfc->dev,
+ "No minimum ECC strength, using 1b/512B\n");
+ ecc->size = 512;
+ ecc->strength = 1;
+ }
+ }
+
+ switch (ecc->mode) {
+ case NAND_ECC_HW:
+ ret = marvell_nand_hw_ecc_ctrl_init(mtd, ecc);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_NONE:
+ case NAND_ECC_SOFT:
+ if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
+ mtd->writesize != SZ_2K) {
+ dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
+ mtd->writesize);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_mirror_pattern
+};
+
+static int marvell_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr,
+ const struct nand_data_interface
+ *conf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ unsigned int period_ns = 1000000000 / clk_get_rate(nfc->ecc_clk) * 2;
+ const struct nand_sdr_timings *sdr;
+ struct marvell_nfc_timings nfc_tmg;
+ int read_delay;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ /*
+ * SDR timings are given in pico-seconds while NFC timings must be
+ * expressed in NAND controller clock cycles, which is half of the
+ * frequency of the accessible ECC clock retrieved by clk_get_rate().
+ * This is not written anywhere in the datasheet but was observed
+ * with an oscilloscope.
+ *
+ * NFC datasheet gives equations from which thoses calculations
+ * are derived, they tend to be slightly more restrictives than the
+ * given core timings and may improve the overall speed.
+ */
+ nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1;
+ nfc_tmg.tRH = nfc_tmg.tRP;
+ nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1;
+ nfc_tmg.tWH = nfc_tmg.tWP;
+ nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns);
+ nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1;
+ nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns);
+ /*
+ * Read delay is the time of propagation from SoC pins to NFC internal
+ * logic. With non-EDO timings, this is MIN_RD_DEL_CNT clock cycles. In
+ * EDO mode, an additional delay of tRH must be taken into account so
+ * the data is sampled on the falling edge instead of the rising edge.
+ */
+ read_delay = sdr->tRC_min >= 30000 ?
+ MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH;
+
+ nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns);
+ /*
+ * tWHR and tRHW are supposed to be read to write delays (and vice
+ * versa) but in some cases, ie. when doing a change column, they must
+ * be greater than that to be sure tCCS delay is respected.
+ */
+ nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min),
+ period_ns) - 2,
+ nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min),
+ period_ns);
+
+ /* Use WAIT_MODE (wait for RB line) instead of only relying on delays */
+ nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns);
+
+ if (chipnr < 0)
+ return 0;
+
+ marvell_nand->ndtr0 =
+ NDTR0_TRP(nfc_tmg.tRP) |
+ NDTR0_TRH(nfc_tmg.tRH) |
+ NDTR0_ETRP(nfc_tmg.tRP) |
+ NDTR0_TWP(nfc_tmg.tWP) |
+ NDTR0_TWH(nfc_tmg.tWH) |
+ NDTR0_TCS(nfc_tmg.tCS) |
+ NDTR0_TCH(nfc_tmg.tCH) |
+ NDTR0_RD_CNT_DEL(read_delay) |
+ NDTR0_SELCNTR |
+ NDTR0_TADL(nfc_tmg.tADL);
+
+ marvell_nand->ndtr1 =
+ NDTR1_TAR(nfc_tmg.tAR) |
+ NDTR1_TWHR(nfc_tmg.tWHR) |
+ NDTR1_TRHW(nfc_tmg.tRHW) |
+ NDTR1_WAIT_MODE |
+ NDTR1_TR(nfc_tmg.tR);
+
+ return 0;
+}
+
+static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
+ struct device_node *np)
+{
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(dev);
+ struct marvell_nand_chip *marvell_nand;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs, rb;
+
+ /*
+ * The legacy "num-cs" property indicates the number of CS on the only
+ * chip connected to the controller (legacy bindings does not support
+ * more than one chip). CS are only incremented one by one while the RB
+ * pin is always the #0.
+ *
+ * When not using legacy bindings, a couple of "reg" and "nand-rb"
+ * properties must be filled. For each chip, expressed as a subnode,
+ * "reg" points to the CS lines and "nand-rb" to the RB line.
+ */
+ if (pdata) {
+ nsels = 1;
+ } else if (nfc->caps->legacy_of_bindings &&
+ !of_get_property(np, "num-cs", &nsels)) {
+ dev_err(dev, "missing num-cs property\n");
+ return -EINVAL;
+ } else if (!of_get_property(np, "reg", &nsels)) {
+ dev_err(dev, "missing reg property\n");
+ return -EINVAL;
+ }
+
+ if (!pdata)
+ nsels /= sizeof(u32);
+ if (!nsels) {
+ dev_err(dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ /* Alloc the nand chip structure */
+ marvell_nand = devm_kzalloc(dev, sizeof(*marvell_nand) +
+ (nsels *
+ sizeof(struct marvell_nand_chip_sel)),
+ GFP_KERNEL);
+ if (!marvell_nand) {
+ dev_err(dev, "could not allocate chip structure\n");
+ return -ENOMEM;
+ }
+
+ marvell_nand->nsels = nsels;
+ marvell_nand->selected_die = -1;
+
+ for (i = 0; i < nsels; i++) {
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * Legacy bindings use the CS lines in natural
+ * order (0, 1, ...)
+ */
+ cs = i;
+ } else {
+ /* Retrieve CS id */
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (cs >= nfc->caps->max_cs_nb) {
+ dev_err(dev, "invalid reg value: %u (max CS = %d)\n",
+ cs, nfc->caps->max_cs_nb);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ /*
+ * The cs variable represents the chip select id, which must be
+ * converted in bit fields for NDCB0 and NDCB2 to select the
+ * right chip. Unfortunately, due to a lack of information on
+ * the subject and incoherent documentation, the user should not
+ * use CS1 and CS3 at all as asserting them is not supported in
+ * a reliable way (due to multiplexing inside ADDR5 field).
+ */
+ marvell_nand->sels[i].cs = cs;
+ switch (cs) {
+ case 0:
+ case 2:
+ marvell_nand->sels[i].ndcb0_csel = 0;
+ break;
+ case 1:
+ case 3:
+ marvell_nand->sels[i].ndcb0_csel = NDCB0_CSEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Retrieve RB id */
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /* Legacy bindings always use RB #0 */
+ rb = 0;
+ } else {
+ ret = of_property_read_u32_index(np, "nand-rb", i,
+ &rb);
+ if (ret) {
+ dev_err(dev,
+ "could not retrieve RB property: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (rb >= nfc->caps->max_rb_nb) {
+ dev_err(dev, "invalid reg value: %u (max RB = %d)\n",
+ rb, nfc->caps->max_rb_nb);
+ return -EINVAL;
+ }
+
+ marvell_nand->sels[i].rb = rb;
+ }
+
+ chip = &marvell_nand->chip;
+ chip->controller = &nfc->controller;
+ nand_set_flash_node(chip, np);
+
+ chip->exec_op = marvell_nfc_exec_op;
+ chip->select_chip = marvell_nfc_select_chip;
+ if (nfc->caps->is_nfcv2 &&
+ !of_property_read_bool(np, "marvell,nand-keep-config"))
+ chip->setup_data_interface = marvell_nfc_setup_data_interface;
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = dev;
+
+ /*
+ * Default to HW ECC engine mode. If the nand-ecc-mode property is given
+ * in the DT node, this entry will be overwritten in nand_scan_ident().
+ */
+ chip->ecc.mode = NAND_ECC_HW;
+
+ /*
+ * Save a reference value for timing registers before
+ * ->setup_data_interface() is called.
+ */
+ marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0);
+ marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
+
+ chip->options |= NAND_BUSWIDTH_AUTO;
+ ret = nand_scan_ident(mtd, marvell_nand->nsels, NULL);
+ if (ret) {
+ dev_err(dev, "could not identify the nand chip\n");
+ return ret;
+ }
+
+ if (pdata && pdata->flash_bbt)
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ /*
+ * We'll use a bad block table stored in-flash and don't
+ * allow writing the bad block marker to the flash.
+ */
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /* Save the chip-specific fields of NDCR */
+ marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
+ if (chip->options & NAND_BUSWIDTH_16)
+ marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ /*
+ * On small page NANDs, only one cycle is needed to pass the
+ * column address.
+ */
+ if (mtd->writesize <= 512) {
+ marvell_nand->addr_cyc = 1;
+ } else {
+ marvell_nand->addr_cyc = 2;
+ marvell_nand->ndcr |= NDCR_RA_START;
+ }
+
+ /*
+ * Now add the number of cycles needed to pass the row
+ * address.
+ *
+ * Addressing a chip using CS 2 or 3 should also need the third row
+ * cycle but due to inconsistance in the documentation and lack of
+ * hardware to test this situation, this case is not supported.
+ */
+ if (chip->options & NAND_ROW_ADDR_3)
+ marvell_nand->addr_cyc += 3;
+ else
+ marvell_nand->addr_cyc += 2;
+
+ if (pdata) {
+ chip->ecc.size = pdata->ecc_step_size;
+ chip->ecc.strength = pdata->ecc_strength;
+ }
+
+ ret = marvell_nand_ecc_init(mtd, &chip->ecc);
+ if (ret) {
+ dev_err(dev, "ECC init failed: %d\n", ret);
+ return ret;
+ }
+
+ if (chip->ecc.mode == NAND_ECC_HW) {
+ /*
+ * Subpage write not available with hardware ECC, prohibit also
+ * subpage read as in userspace subpage access would still be
+ * allowed and subpage write, if used, would lead to numerous
+ * uncorrectable ECC errors.
+ */
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ }
+
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * We keep the MTD name unchanged to avoid breaking platforms
+ * where the MTD cmdline parser is used and the bootloader
+ * has not been updated to use the new naming scheme.
+ */
+ mtd->name = "pxa3xx_nand-0";
+ } else if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your NAND node, ie:
+ *
+ * label = "main-storage";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s:nand.%d", dev_name(nfc->dev),
+ marvell_nand->sels[0].cs);
+ if (!mtd->name) {
+ dev_err(nfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+ return ret;
+ }
+
+ if (pdata)
+ /* Legacy bindings support only one chip */
+ ret = mtd_device_register(mtd, pdata->parts[0],
+ pdata->nr_parts[0]);
+ else
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register mtd device: %d\n", ret);
+ nand_release(mtd);
+ return ret;
+ }
+
+ list_add_tail(&marvell_nand->node, &nfc->chips);
+
+ return 0;
+}
+
+static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int max_cs = nfc->caps->max_cs_nb;
+ int nchips;
+ int ret;
+
+ if (!np)
+ nchips = 1;
+ else
+ nchips = of_get_child_count(np);
+
+ if (nchips > max_cs) {
+ dev_err(dev, "too many NAND chips: %d (max = %d CS)\n", nchips,
+ max_cs);
+ return -EINVAL;
+ }
+
+ /*
+ * Legacy bindings do not use child nodes to exhibit NAND chip
+ * properties and layout. Instead, NAND properties are mixed with the
+ * controller ones, and partitions are defined as direct subnodes of the
+ * NAND controller node.
+ */
+ if (nfc->caps->legacy_of_bindings) {
+ ret = marvell_nand_chip_init(dev, nfc, np);
+ return ret;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = marvell_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
+{
+ struct marvell_nand_chip *entry, *temp;
+
+ list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
+ nand_release(nand_to_mtd(&entry->chip));
+ list_del(&entry->node);
+ }
+}
+
+static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
+{
+ struct platform_device *pdev = container_of(nfc->dev,
+ struct platform_device,
+ dev);
+ struct dma_slave_config config = {};
+ struct resource *r;
+ dma_cap_mask_t mask;
+ struct pxad_param param;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_PXA_DMA)) {
+ dev_warn(nfc->dev,
+ "DMA not enabled in configuration\n");
+ return -ENOTSUPP;
+ }
+
+ ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!r) {
+ dev_err(nfc->dev, "No resource defined for data DMA\n");
+ return -ENXIO;
+ }
+
+ param.drcmr = r->start;
+ param.prio = PXAD_PRIO_LOWEST;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ nfc->dma_chan =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, nfc->dev,
+ "data");
+ if (!nfc->dma_chan) {
+ dev_err(nfc->dev,
+ "Unable to request data DMA channel\n");
+ return -ENODEV;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENXIO;
+
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.src_addr = r->start + NDDB;
+ config.dst_addr = r->start + NDDB;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+ ret = dmaengine_slave_config(nfc->dma_chan, &config);
+ if (ret < 0) {
+ dev_err(nfc->dev, "Failed to configure DMA channel\n");
+ return ret;
+ }
+
+ /*
+ * DMA must act on length multiple of 32 and this length may be
+ * bigger than the destination buffer. Use this buffer instead
+ * for DMA transfers and then copy the desired amount of data to
+ * the provided buffer.
+ */
+ nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!nfc->dma_buf)
+ return -ENOMEM;
+
+ nfc->use_dma = true;
+
+ return 0;
+}
+
+static int marvell_nfc_init(struct marvell_nfc *nfc)
+{
+ struct device_node *np = nfc->dev->of_node;
+
+ /*
+ * Some SoCs like A7k/A8k need to enable manually the NAND
+ * controller, gated clocks and reset bits to avoid being bootloader
+ * dependent. This is done through the use of the System Functions
+ * registers.
+ */
+ if (nfc->caps->need_system_controller) {
+ struct regmap *sysctrl_base =
+ syscon_regmap_lookup_by_phandle(np,
+ "marvell,system-controller");
+ u32 reg;
+
+ if (IS_ERR(sysctrl_base))
+ return PTR_ERR(sysctrl_base);
+
+ reg = GENCONF_SOC_DEVICE_MUX_NFC_EN |
+ GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST |
+ GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST |
+ GENCONF_SOC_DEVICE_MUX_NFC_INT_EN;
+ regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
+
+ regmap_read(sysctrl_base, GENCONF_CLK_GATING_CTRL, &reg);
+ reg |= GENCONF_CLK_GATING_CTRL_ND_GATE;
+ regmap_write(sysctrl_base, GENCONF_CLK_GATING_CTRL, reg);
+
+ regmap_read(sysctrl_base, GENCONF_ND_CLK_CTRL, &reg);
+ reg |= GENCONF_ND_CLK_CTRL_EN;
+ regmap_write(sysctrl_base, GENCONF_ND_CLK_CTRL, reg);
+ }
+
+ /* Configure the DMA if appropriate */
+ if (!nfc->caps->is_nfcv2)
+ marvell_nfc_init_dma(nfc);
+
+ /*
+ * ECC operations and interruptions are only enabled when specifically
+ * needed. ECC shall not be activated in the early stages (fails probe).
+ * Arbiter flag, even if marked as "reserved", must be set (empirical).
+ * SPARE_EN bit must always be set or ECC bytes will not be at the same
+ * offset in the read page and this will fail the protection.
+ */
+ writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
+ NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
+ writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+
+ return 0;
+}
+
+static int marvell_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ struct marvell_nfc *nfc;
+ int ret;
+ int irq;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(struct marvell_nfc),
+ GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = dev;
+ nand_hw_control_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->regs = devm_ioremap_resource(dev, r);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to retrieve irq\n");
+ return irq;
+ }
+
+ nfc->ecc_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nfc->ecc_clk))
+ return PTR_ERR(nfc->ecc_clk);
+
+ ret = clk_prepare_enable(nfc->ecc_clk);
+ if (ret)
+ return ret;
+
+ marvell_nfc_disable_int(nfc, NDCR_ALL_INT);
+ marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+ ret = devm_request_irq(dev, irq, marvell_nfc_isr,
+ 0, "marvell-nfc", nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ /* Get NAND controller capabilities */
+ if (pdev->id_entry)
+ nfc->caps = (void *)pdev->id_entry->driver_data;
+ else
+ nfc->caps = of_device_get_match_data(&pdev->dev);
+
+ if (!nfc->caps) {
+ dev_err(dev, "Could not retrieve NFC caps\n");
+ ret = -EINVAL;
+ goto unprepare_clk;
+ }
+
+ /* Init the controller and then probe the chips */
+ ret = marvell_nfc_init(nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = marvell_nand_chips_init(dev, nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ return 0;
+
+unprepare_clk:
+ clk_disable_unprepare(nfc->ecc_clk);
+
+ return ret;
+}
+
+static int marvell_nfc_remove(struct platform_device *pdev)
+{
+ struct marvell_nfc *nfc = platform_get_drvdata(pdev);
+
+ marvell_nand_chips_cleanup(nfc);
+
+ if (nfc->use_dma) {
+ dmaengine_terminate_all(nfc->dma_chan);
+ dma_release_channel(nfc->dma_chan);
+ }
+
+ clk_disable_unprepare(nfc->ecc_clk);
+
+ return 0;
+}
+
+static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .need_system_controller = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_pxa3xx_nfc_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .use_dma = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada_8k_nfc_legacy_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .need_system_controller = true,
+ .legacy_of_bindings = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada370_nfc_legacy_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .legacy_of_bindings = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_pxa3xx_nfc_legacy_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .legacy_of_bindings = true,
+ .use_dma = true,
+};
+
+static const struct platform_device_id marvell_nfc_platform_ids[] = {
+ {
+ .name = "pxa3xx-nand",
+ .driver_data = (kernel_ulong_t)&marvell_pxa3xx_nfc_legacy_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, marvell_nfc_platform_ids);
+
+static const struct of_device_id marvell_nfc_of_ids[] = {
+ {
+ .compatible = "marvell,armada-8k-nand-controller",
+ .data = &marvell_armada_8k_nfc_caps,
+ },
+ {
+ .compatible = "marvell,armada370-nand-controller",
+ .data = &marvell_armada370_nfc_caps,
+ },
+ {
+ .compatible = "marvell,pxa3xx-nand-controller",
+ .data = &marvell_pxa3xx_nfc_caps,
+ },
+ /* Support for old/deprecated bindings: */
+ {
+ .compatible = "marvell,armada-8k-nand",
+ .data = &marvell_armada_8k_nfc_legacy_caps,
+ },
+ {
+ .compatible = "marvell,armada370-nand",
+ .data = &marvell_armada370_nfc_legacy_caps,
+ },
+ {
+ .compatible = "marvell,pxa3xx-nand",
+ .data = &marvell_pxa3xx_nfc_legacy_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, marvell_nfc_of_ids);
+
+static struct platform_driver marvell_nfc_driver = {
+ .driver = {
+ .name = "marvell-nfc",
+ .of_match_table = marvell_nfc_of_ids,
+ },
+ .id_table = marvell_nfc_platform_ids,
+ .probe = marvell_nfc_probe,
+ .remove = marvell_nfc_remove,
+};
+module_platform_driver(marvell_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Marvell NAND controller driver");
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index c51d214..40d86a8 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -34,34 +34,28 @@
#define ECC_ENCCON (0x00)
#define ECC_ENCCNFG (0x04)
-#define ECC_MODE_SHIFT (5)
#define ECC_MS_SHIFT (16)
#define ECC_ENCDIADDR (0x08)
#define ECC_ENCIDLE (0x0C)
-#define ECC_ENCIRQ_EN (0x80)
-#define ECC_ENCIRQ_STA (0x84)
#define ECC_DECCON (0x100)
#define ECC_DECCNFG (0x104)
#define DEC_EMPTY_EN BIT(31)
#define DEC_CNFG_CORRECT (0x3 << 12)
#define ECC_DECIDLE (0x10C)
#define ECC_DECENUM0 (0x114)
-#define ECC_DECDONE (0x124)
-#define ECC_DECIRQ_EN (0x200)
-#define ECC_DECIRQ_STA (0x204)
#define ECC_TIMEOUT (500000)
#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
-#define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \
- ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
struct mtk_ecc_caps {
u32 err_mask;
const u8 *ecc_strength;
+ const u32 *ecc_regs;
u8 num_ecc_strength;
- u32 encode_parity_reg0;
+ u8 ecc_mode_shift;
+ u32 parity_bits;
int pg_irq_sel;
};
@@ -89,6 +83,46 @@ static const u8 ecc_strength_mt2712[] = {
40, 44, 48, 52, 56, 60, 68, 72, 80
};
+static const u8 ecc_strength_mt7622[] = {
+ 4, 6, 8, 10, 12, 14, 16
+};
+
+enum mtk_ecc_regs {
+ ECC_ENCPAR00,
+ ECC_ENCIRQ_EN,
+ ECC_ENCIRQ_STA,
+ ECC_DECDONE,
+ ECC_DECIRQ_EN,
+ ECC_DECIRQ_STA,
+};
+
+static int mt2701_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x10,
+ [ECC_ENCIRQ_EN] = 0x80,
+ [ECC_ENCIRQ_STA] = 0x84,
+ [ECC_DECDONE] = 0x124,
+ [ECC_DECIRQ_EN] = 0x200,
+ [ECC_DECIRQ_STA] = 0x204,
+};
+
+static int mt2712_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x300,
+ [ECC_ENCIRQ_EN] = 0x80,
+ [ECC_ENCIRQ_STA] = 0x84,
+ [ECC_DECDONE] = 0x124,
+ [ECC_DECIRQ_EN] = 0x200,
+ [ECC_DECIRQ_STA] = 0x204,
+};
+
+static int mt7622_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x10,
+ [ECC_ENCIRQ_EN] = 0x30,
+ [ECC_ENCIRQ_STA] = 0x34,
+ [ECC_DECDONE] = 0x11c,
+ [ECC_DECIRQ_EN] = 0x140,
+ [ECC_DECIRQ_STA] = 0x144,
+};
+
static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
enum mtk_ecc_operation op)
{
@@ -107,32 +141,30 @@ static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
static irqreturn_t mtk_ecc_irq(int irq, void *id)
{
struct mtk_ecc *ecc = id;
- enum mtk_ecc_operation op;
u32 dec, enc;
- dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN;
+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA])
+ & ECC_IRQ_EN;
if (dec) {
- op = ECC_DECODE;
- dec = readw(ecc->regs + ECC_DECDONE);
+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
if (dec & ecc->sectors) {
/*
* Clear decode IRQ status once again to ensure that
* there will be no extra IRQ.
*/
- readw(ecc->regs + ECC_DECIRQ_STA);
+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]);
ecc->sectors = 0;
complete(&ecc->done);
} else {
return IRQ_HANDLED;
}
} else {
- enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN;
- if (enc) {
- op = ECC_ENCODE;
+ enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA])
+ & ECC_IRQ_EN;
+ if (enc)
complete(&ecc->done);
- } else {
+ else
return IRQ_NONE;
- }
}
return IRQ_HANDLED;
@@ -160,7 +192,7 @@ static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
/* configure ECC encoder (in bits) */
enc_sz = config->len << 3;
- reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
reg |= (enc_sz << ECC_MS_SHIFT);
writel(reg, ecc->regs + ECC_ENCCNFG);
@@ -171,9 +203,9 @@ static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
} else {
/* configure ECC decoder (in bits) */
dec_sz = (config->len << 3) +
- config->strength * ECC_PARITY_BITS;
+ config->strength * ecc->caps->parity_bits;
- reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
reg |= DEC_EMPTY_EN;
writel(reg, ecc->regs + ECC_DECCNFG);
@@ -291,7 +323,12 @@ int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
*/
if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE)
reg_val |= ECC_PG_IRQ_SEL;
- writew(reg_val, ecc->regs + ECC_IRQ_REG(op));
+ if (op == ECC_ENCODE)
+ writew(reg_val, ecc->regs +
+ ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
+ else
+ writew(reg_val, ecc->regs +
+ ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
}
writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
@@ -310,13 +347,17 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
/* disable it */
mtk_ecc_wait_idle(ecc, op);
- if (op == ECC_DECODE)
+ if (op == ECC_DECODE) {
/*
* Clear decode IRQ status in case there is a timeout to wait
* decode IRQ.
*/
- readw(ecc->regs + ECC_DECIRQ_STA);
- writew(0, ecc->regs + ECC_IRQ_REG(op));
+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
+ } else {
+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
+ }
+
writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
mutex_unlock(&ecc->lock);
@@ -367,11 +408,11 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
mtk_ecc_wait_idle(ecc, ECC_ENCODE);
/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
- len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
+ len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
/* write the parity bytes generated by the ECC back to temp buffer */
__ioread32_copy(ecc->eccdata,
- ecc->regs + ecc->caps->encode_parity_reg0,
+ ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00],
round_up(len, 4));
/* copy into possibly unaligned OOB region with actual length */
@@ -404,22 +445,42 @@ void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
}
EXPORT_SYMBOL(mtk_ecc_adjust_strength);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc)
+{
+ return ecc->caps->parity_bits;
+}
+EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
+
static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
.err_mask = 0x3f,
.ecc_strength = ecc_strength_mt2701,
+ .ecc_regs = mt2701_ecc_regs,
.num_ecc_strength = 20,
- .encode_parity_reg0 = 0x10,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
.pg_irq_sel = 0,
};
static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
.err_mask = 0x7f,
.ecc_strength = ecc_strength_mt2712,
+ .ecc_regs = mt2712_ecc_regs,
.num_ecc_strength = 23,
- .encode_parity_reg0 = 0x300,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
.pg_irq_sel = 1,
};
+static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
+ .err_mask = 0x3f,
+ .ecc_strength = ecc_strength_mt7622,
+ .ecc_regs = mt7622_ecc_regs,
+ .num_ecc_strength = 7,
+ .ecc_mode_shift = 4,
+ .parity_bits = 13,
+ .pg_irq_sel = 0,
+};
+
static const struct of_device_id mtk_ecc_dt_match[] = {
{
.compatible = "mediatek,mt2701-ecc",
@@ -427,6 +488,9 @@ static const struct of_device_id mtk_ecc_dt_match[] = {
}, {
.compatible = "mediatek,mt2712-ecc",
.data = &mtk_ecc_caps_mt2712,
+ }, {
+ .compatible = "mediatek,mt7622-ecc",
+ .data = &mtk_ecc_caps_mt7622,
},
{},
};
@@ -452,7 +516,7 @@ static int mtk_ecc_probe(struct platform_device *pdev)
max_eccdata_size = ecc->caps->num_ecc_strength - 1;
max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
- max_eccdata_size = (max_eccdata_size * ECC_PARITY_BITS + 7) >> 3;
+ max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3;
max_eccdata_size = round_up(max_eccdata_size, 4);
ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL);
if (!ecc->eccdata)
diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h
index d245c14..a455df0 100644
--- a/drivers/mtd/nand/mtk_ecc.h
+++ b/drivers/mtd/nand/mtk_ecc.h
@@ -14,8 +14,6 @@
#include <linux/types.h>
-#define ECC_PARITY_BITS (14)
-
enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
@@ -43,6 +41,7 @@ int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
void mtk_ecc_disable(struct mtk_ecc *);
void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
void mtk_ecc_release(struct mtk_ecc *);
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index d86a7d1..6977da3 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -97,7 +97,6 @@
#define MTK_TIMEOUT (500000)
#define MTK_RESET_TIMEOUT (1000000)
-#define MTK_MAX_SECTOR (16)
#define MTK_NAND_MAX_NSELS (2)
#define MTK_NFC_MIN_SPARE (16)
#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
@@ -109,6 +108,8 @@ struct mtk_nfc_caps {
u8 num_spare_size;
u8 pageformat_spare_shift;
u8 nfi_clk_div;
+ u8 max_sector;
+ u32 max_sector_size;
};
struct mtk_nfc_bad_mark_ctl {
@@ -173,6 +174,10 @@ static const u8 spare_size_mt2712[] = {
74
};
+static const u8 spare_size_mt7622[] = {
+ 16, 26, 27, 28
+};
+
static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
{
return container_of(nand, struct mtk_nfc_nand_chip, nand);
@@ -450,7 +455,7 @@ static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
* set to max sector to allow the HW to continue reading over
* unaligned accesses
*/
- reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
+ reg = (nfc->caps->max_sector << CON_SEC_SHIFT) | CON_BRD;
nfi_writel(nfc, reg, NFI_CON);
/* trigger to fetch data */
@@ -481,7 +486,7 @@ static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
nfi_writew(nfc, reg, NFI_CNFG);
- reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
+ reg = nfc->caps->max_sector << CON_SEC_SHIFT | CON_BWR;
nfi_writel(nfc, reg, NFI_CON);
nfi_writew(nfc, STAR_EN, NFI_STRDATA);
@@ -761,6 +766,8 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
u32 reg;
int ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
if (!raw) {
/* OOB => FDM: from register, ECC: from HW */
reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
@@ -794,7 +801,10 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (!raw)
mtk_ecc_disable(nfc->ecc);
- return ret;
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
}
static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
@@ -832,18 +842,7 @@ static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- int ret;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
- ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
- if (ret < 0)
- return -EIO;
-
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- ret = chip->waitfunc(mtd, chip);
-
- return ret & NAND_STATUS_FAIL ? -EIO : 0;
+ return mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
}
static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
@@ -892,8 +891,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
buf = bufpoi + start * chip->ecc.size;
- if (column != 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
+ nand_read_page_op(chip, page, column, NULL, 0);
addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
rc = dma_mapping_error(nfc->dev, addr);
@@ -1016,8 +1014,6 @@ static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
}
@@ -1126,9 +1122,11 @@ static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
u32 ecc_bytes;
- ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
+ ecc_bytes = DIV_ROUND_UP(nand->ecc.strength *
+ mtk_ecc_get_parity_bits(nfc->ecc), 8);
fdm->reg_size = chip->spare_per_sector - ecc_bytes;
if (fdm->reg_size > NFI_FDM_MAX_SIZE)
@@ -1208,7 +1206,8 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
* this controller only supports 512 and 1024 sizes
*/
if (nand->ecc.size < 1024) {
- if (mtd->writesize > 512) {
+ if (mtd->writesize > 512 &&
+ nfc->caps->max_sector_size > 512) {
nand->ecc.size = 1024;
nand->ecc.strength <<= 1;
} else {
@@ -1223,7 +1222,8 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
return ret;
/* calculate oob bytes except ecc parity data */
- free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3;
+ free = (nand->ecc.strength * mtk_ecc_get_parity_bits(nfc->ecc)
+ + 7) >> 3;
free = spare - free;
/*
@@ -1233,10 +1233,12 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
*/
if (free > NFI_FDM_MAX_SIZE) {
spare -= NFI_FDM_MAX_SIZE;
- nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+ nand->ecc.strength = (spare << 3) /
+ mtk_ecc_get_parity_bits(nfc->ecc);
} else if (free < 0) {
spare -= NFI_FDM_MIN_SIZE;
- nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+ nand->ecc.strength = (spare << 3) /
+ mtk_ecc_get_parity_bits(nfc->ecc);
}
}
@@ -1389,6 +1391,8 @@ static const struct mtk_nfc_caps mtk_nfc_caps_mt2701 = {
.num_spare_size = 16,
.pageformat_spare_shift = 4,
.nfi_clk_div = 1,
+ .max_sector = 16,
+ .max_sector_size = 1024,
};
static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
@@ -1396,6 +1400,17 @@ static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
.num_spare_size = 19,
.pageformat_spare_shift = 16,
.nfi_clk_div = 2,
+ .max_sector = 16,
+ .max_sector_size = 1024,
+};
+
+static const struct mtk_nfc_caps mtk_nfc_caps_mt7622 = {
+ .spare_size = spare_size_mt7622,
+ .num_spare_size = 4,
+ .pageformat_spare_shift = 4,
+ .nfi_clk_div = 1,
+ .max_sector = 8,
+ .max_sector_size = 512,
};
static const struct of_device_id mtk_nfc_id_table[] = {
@@ -1405,6 +1420,9 @@ static const struct of_device_id mtk_nfc_id_table[] = {
}, {
.compatible = "mediatek,mt2712-nfc",
.data = &mtk_nfc_caps_mt2712,
+ }, {
+ .compatible = "mediatek,mt7622-nfc",
+ .data = &mtk_nfc_caps_mt7622,
},
{}
};
@@ -1540,7 +1558,6 @@ static int mtk_nfc_resume(struct device *dev)
struct mtk_nfc *nfc = dev_get_drvdata(dev);
struct mtk_nfc_nand_chip *chip;
struct nand_chip *nand;
- struct mtd_info *mtd;
int ret;
u32 i;
@@ -1553,11 +1570,8 @@ static int mtk_nfc_resume(struct device *dev)
/* reset NAND chip if VCC was powered off */
list_for_each_entry(chip, &nfc->chips, node) {
nand = &chip->nand;
- mtd = nand_to_mtd(nand);
- for (i = 0; i < chip->nsels; i++) {
- nand->select_chip(mtd, i);
- nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- }
+ for (i = 0; i < chip->nsels; i++)
+ nand_reset(nand, i);
}
return 0;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 6135d00..e70ca16 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -561,14 +561,19 @@ static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
static int nand_check_wp(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ u8 status;
+ int ret;
/* Broken xD cards report WP despite being writable */
if (chip->options & NAND_BROKEN_XD)
return 0;
/* Check the WP bit */
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_WP ? 0 : 1;
}
/**
@@ -667,16 +672,83 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
{
register struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret;
timeo = jiffies + msecs_to_jiffies(timeo);
do {
- if ((chip->read_byte(mtd) & NAND_STATUS_READY))
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
break;
touch_softlockup_watchdog();
} while (time_before(jiffies, timeo));
};
/**
+ * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
+ * @chip: NAND chip structure
+ * @timeout_ms: Timeout in ms
+ *
+ * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
+ * If that does not happen whitin the specified timeout, -ETIMEDOUT is
+ * returned.
+ *
+ * This helper is intended to be used when the controller does not have access
+ * to the NAND R/B pin.
+ *
+ * Be aware that calling this helper from an ->exec_op() implementation means
+ * ->exec_op() must be re-entrant.
+ *
+ * Return 0 if the NAND chip is ready, a negative error otherwise.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+{
+ u8 status = 0;
+ int ret;
+
+ if (!chip->exec_op)
+ return -ENOTSUPP;
+
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
+
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ break;
+
+ if (status & NAND_STATUS_READY)
+ break;
+
+ /*
+ * Typical lowest execution time for a tR on most NANDs is 10us,
+ * use this as polling delay before doing something smarter (ie.
+ * deriving a delay from the timeout value, timeout_ms/ratio).
+ */
+ udelay(10);
+ } while (time_before(jiffies, timeout_ms));
+
+ /*
+ * We have to exit READ_STATUS mode in order to read real data on the
+ * bus in case the WAITRDY instruction is preceding a DATA_IN
+ * instruction.
+ */
+ nand_exit_status_op(chip);
+
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
+};
+EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
+
+/**
* nand_command - [DEFAULT] Send command to NAND device
* @mtd: MTD device structure
* @command: the command to be sent
@@ -710,7 +782,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, readcmd, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
}
- chip->cmd_ctrl(mtd, command, ctrl);
+ if (command != NAND_CMD_NONE)
+ chip->cmd_ctrl(mtd, command, ctrl);
/* Address cycle, when necessary */
ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
@@ -738,6 +811,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
+ case NAND_CMD_NONE:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
@@ -802,8 +876,8 @@ static void nand_ccs_delay(struct nand_chip *chip)
* Wait tCCS_min if it is correctly defined, otherwise wait 500ns
* (which should be safe for all NANDs).
*/
- if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
- ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
+ if (chip->setup_data_interface)
+ ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
else
ndelay(500);
}
@@ -831,7 +905,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
/* Command latch cycle */
- chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ if (command != NAND_CMD_NONE)
+ chip->cmd_ctrl(mtd, command,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (column != -1 || page_addr != -1) {
int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@@ -866,6 +942,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
+ case NAND_CMD_NONE:
case NAND_CMD_CACHEDPROG:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
@@ -1014,7 +1091,15 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
if (chip->dev_ready(mtd))
break;
} else {
- if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ int ret;
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status),
+ true);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
break;
}
mdelay(1);
@@ -1031,8 +1116,9 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
- int status;
unsigned long timeo = 400;
+ u8 status;
+ int ret;
/*
* Apply this short delay always to ensure that we do wait tWB in any
@@ -1040,7 +1126,9 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
*/
ndelay(100);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
if (in_interrupt() || oops_in_progress)
panic_nand_wait(mtd, chip, timeo);
@@ -1051,14 +1139,22 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
if (chip->dev_ready(mtd))
break;
} else {
- if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ ret = nand_read_data_op(chip, &status,
+ sizeof(status), true);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_READY)
break;
}
cond_resched();
} while (time_before(jiffies, timeo));
}
- status = (int)chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ return ret;
+
/* This can happen if in case of timeout or buggy dev_ready */
WARN_ON(!(status & NAND_STATUS_READY));
return status;
@@ -1076,7 +1172,6 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- const struct nand_data_interface *conf;
int ret;
if (!chip->setup_data_interface)
@@ -1096,8 +1191,8 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
* timings to timing mode 0.
*/
- conf = nand_get_default_data_interface();
- ret = chip->setup_data_interface(mtd, chipnr, conf);
+ onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
+ ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
if (ret)
pr_err("Failed to configure data interface to SDR timing mode 0\n");
@@ -1122,7 +1217,7 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
- if (!chip->setup_data_interface || !chip->data_interface)
+ if (!chip->setup_data_interface)
return 0;
/*
@@ -1143,7 +1238,7 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
goto err;
}
- ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface);
+ ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
err:
return ret;
}
@@ -1183,21 +1278,19 @@ static int nand_init_data_interface(struct nand_chip *chip)
modes = GENMASK(chip->onfi_timing_mode_default, 0);
}
- chip->data_interface = kzalloc(sizeof(*chip->data_interface),
- GFP_KERNEL);
- if (!chip->data_interface)
- return -ENOMEM;
for (mode = fls(modes) - 1; mode >= 0; mode--) {
- ret = onfi_init_data_interface(chip, chip->data_interface,
- NAND_SDR_IFACE, mode);
+ ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
if (ret)
continue;
- /* Pass -1 to only */
+ /*
+ * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
+ * controller supports the requested timings.
+ */
ret = chip->setup_data_interface(mtd,
NAND_DATA_IFACE_CHECK_ONLY,
- chip->data_interface);
+ &chip->data_interface);
if (!ret) {
chip->onfi_timing_mode_default = mode;
break;
@@ -1207,21 +1300,1429 @@ static int nand_init_data_interface(struct nand_chip *chip)
return 0;
}
-static void nand_release_data_interface(struct nand_chip *chip)
+/**
+ * nand_fill_column_cycles - fill the column cycles of an address
+ * @chip: The NAND chip
+ * @addrs: Array of address cycles to fill
+ * @offset_in_page: The offset in the page
+ *
+ * Fills the first or the first two bytes of the @addrs field depending
+ * on the NAND bus width and the page size.
+ *
+ * Returns the number of cycles needed to encode the column, or a negative
+ * error code in case one of the arguments is invalid.
+ */
+static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ unsigned int offset_in_page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Make sure the offset is less than the actual page size. */
+ if (offset_in_page > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /*
+ * On small page NANDs, there's a dedicated command to access the OOB
+ * area, and the column address is relative to the start of the OOB
+ * area, not the start of the page. Asjust the address accordingly.
+ */
+ if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+ offset_in_page -= mtd->writesize;
+
+ /*
+ * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+ * wide, then it must be divided by 2.
+ */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ if (WARN_ON(offset_in_page % 2))
+ return -EINVAL;
+
+ offset_in_page /= 2;
+ }
+
+ addrs[0] = offset_in_page;
+
+ /*
+ * Small page NANDs use 1 cycle for the columns, while large page NANDs
+ * need 2
+ */
+ if (mtd->writesize <= 512)
+ return 1;
+
+ addrs[1] = offset_in_page >> 8;
+
+ return 2;
+}
+
+static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[4];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[1] = page;
+ addrs[2] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[3] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[5];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(4, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[2] = page;
+ addrs[3] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[4] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+/**
+ * nand_read_page_op - Do a READ PAGE operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ if (mtd->writesize > 512)
+ return nand_lp_exec_read_page_op(chip, page,
+ offset_in_page, buf,
+ len);
+
+ return nand_sp_exec_read_page_op(chip, page, offset_in_page,
+ buf, len);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_page_op);
+
+/**
+ * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
+ * @chip: The NAND chip
+ * @page: parameter page to read
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PARAMETER PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int i;
+ u8 *p = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PARAM, 0),
+ NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1);
+ for (i = 0; i < len; i++)
+ p[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+
+/**
+ * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE READ COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[2] = {};
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
+ PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ instrs[3].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_read_column_op);
+
+/**
+ * nand_read_oob_op - Do a READ OOB operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_oob: offset within the OOB area
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ OOB operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_oob, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_oob + len > mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op)
+ return nand_read_page_op(chip, page,
+ mtd->writesize + offset_in_oob,
+ buf, len);
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_oob_op);
+
+static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool prog)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[5] = {};
+ struct nand_op_instr instrs[] = {
+ /*
+ * The first instruction will be dropped if we're dealing
+ * with a large page NAND and adjusted if we're dealing
+ * with a small page NAND and the page offset is > 255.
+ */
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_CMD(NAND_CMD_SEQIN, 0),
+ NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ int ret;
+ u8 status;
+
+ if (naddrs < 0)
+ return naddrs;
+
+ addrs[naddrs++] = page;
+ addrs[naddrs++] = page >> 8;
+ if (chip->options & NAND_ROW_ADDR_3)
+ addrs[naddrs++] = page >> 16;
+
+ instrs[2].ctx.addr.naddrs = naddrs;
+
+ /* Drop the last two instructions if we're not programming the page. */
+ if (!prog) {
+ op.ninstrs -= 2;
+ /* Also drop the DATA_OUT instruction if empty. */
+ if (!len)
+ op.ninstrs--;
+ }
+
+ if (mtd->writesize <= 512) {
+ /*
+ * Small pages need some more tweaking: we have to adjust the
+ * first instruction depending on the page offset we're trying
+ * to access.
+ */
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+ } else {
+ /*
+ * Drop the first command if we're dealing with a large page
+ * NAND.
+ */
+ op.instrs++;
+ op.ninstrs--;
+ }
+
+ ret = nand_exec_op(chip, &op);
+ if (!prog || ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status;
+}
+
+/**
+ * nand_prog_page_begin_op - starts a PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues the first half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op)
+ return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, false);
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
+
+ if (buf)
+ chip->write_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
+
+/**
+ * nand_prog_page_end_op - ends a PROG PAGE operation
+ * @chip: The NAND chip
+ *
+ * This function issues the second half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_end_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PAGEPROG,
+ PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
+
+/**
+ * nand_prog_page_op - Do a full PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues a full PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
{
- kfree(chip->data_interface);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status;
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, true);
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
+ chip->write_buf(mtd, buf, len);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(nand_prog_page_op);
+
+/**
+ * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to send to the NAND
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE WRITE COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[2];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDIN, 0),
+ NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ instrs[2].ctx.data.force_8bit = force_8bit;
+
+ /* Drop the DATA_OUT instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1);
+ if (len)
+ chip->write_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_write_column_op);
+
+/**
+ * nand_readid_op - Do a READID operation
+ * @chip: The NAND chip
+ * @addr: address cycle to pass after the READID command
+ * @buf: buffer used to store the ID
+ * @len: length of the buffer
+ *
+ * This function sends a READID command and reads back the ID returned by the
+ * NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int i;
+ u8 *id = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READID, 0),
+ NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1);
+
+ for (i = 0; i < len; i++)
+ id[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_readid_op);
+
+/**
+ * nand_status_op - Do a STATUS operation
+ * @chip: The NAND chip
+ * @status: out variable to store the NAND status
+ *
+ * This function sends a STATUS command and reads back the status returned by
+ * the NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_status_op(struct nand_chip *chip, u8 *status)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_STATUS,
+ PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(1, status, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ if (!status)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ if (status)
+ *status = chip->read_byte(mtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_status_op);
+
+/**
+ * nand_exit_status_op - Exit a STATUS operation
+ * @chip: The NAND chip
+ *
+ * This function sends a READ0 command to cancel the effect of the STATUS
+ * command to avoid reading only the status until a new read command is sent.
+ *
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_exit_status_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_exit_status_op);
+
+/**
+ * nand_erase_op - Do an erase operation
+ * @chip: The NAND chip
+ * @eraseblock: block to erase
+ *
+ * This function sends an ERASE command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int page = eraseblock <<
+ (chip->phys_erase_shift - chip->page_shift);
+ int ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[3] = { page, page >> 8, page >> 16 };
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_ERASE1, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_ERASE2,
+ PSEC_TO_MSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ instrs[1].ctx.addr.naddrs++;
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_erase_op);
+
+/**
+ * nand_set_features_op - Do a SET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a SET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_set_features_op(struct nand_chip *chip, u8 feature,
+ const void *data)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const u8 *params = data;
+ int i, ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
+ PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->write_byte(mtd, params[i]);
+
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * nand_get_features_op - Do a GET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a GET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_get_features_op(struct nand_chip *chip, u8 feature,
+ void *data)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *params = data;
+ int i;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
+ data, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ params[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+
+/**
+ * nand_reset_op - Do a reset operation
+ * @chip: The NAND chip
+ *
+ * This function sends a RESET command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_reset_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_reset_op);
+
+/**
+ * nand_read_data_op - Read data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data read on the bus. Usually used after launching
+ * another NAND operation like nand_read_page_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ instrs[0].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ if (force_8bit) {
+ u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ p[i] = chip->read_byte(mtd);
+ } else {
+ chip->read_buf(mtd, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_data_op);
+
+/**
+ * nand_write_data_op - Write data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer containing the data to send on the bus
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data write on the bus. Usually used after launching
+ * another NAND operation like nand_write_page_begin_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ instrs[0].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ if (force_8bit) {
+ const u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ chip->write_byte(mtd, p[i]);
+ } else {
+ chip->write_buf(mtd, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_write_data_op);
+
+/**
+ * struct nand_op_parser_ctx - Context used by the parser
+ * @instrs: array of all the instructions that must be addressed
+ * @ninstrs: length of the @instrs array
+ * @subop: Sub-operation to be passed to the NAND controller
+ *
+ * This structure is used by the core to split NAND operations into
+ * sub-operations that can be handled by the NAND controller.
+ */
+struct nand_op_parser_ctx {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ struct nand_subop subop;
+};
+
+/**
+ * nand_op_parser_must_split_instr - Checks if an instruction must be split
+ * @pat: the parser pattern element that matches @instr
+ * @instr: pointer to the instruction to check
+ * @start_offset: this is an in/out parameter. If @instr has already been
+ * split, then @start_offset is the offset from which to start
+ * (either an address cycle or an offset in the data buffer).
+ * Conversely, if the function returns true (ie. instr must be
+ * split), this parameter is updated to point to the first
+ * data/address cycle that has not been taken care of.
+ *
+ * Some NAND controllers are limited and cannot send X address cycles with a
+ * unique operation, or cannot read/write more than Y bytes at the same time.
+ * In this case, split the instruction that does not fit in a single
+ * controller-operation into two or more chunks.
+ *
+ * Returns true if the instruction must be split, false otherwise.
+ * The @start_offset parameter is also updated to the offset at which the next
+ * bundle of instruction must start (if an address or a data instruction).
+ */
+static bool
+nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
+ const struct nand_op_instr *instr,
+ unsigned int *start_offset)
+{
+ switch (pat->type) {
+ case NAND_OP_ADDR_INSTR:
+ if (!pat->ctx.addr.maxcycles)
+ break;
+
+ if (instr->ctx.addr.naddrs - *start_offset >
+ pat->ctx.addr.maxcycles) {
+ *start_offset += pat->ctx.addr.maxcycles;
+ return true;
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ if (!pat->ctx.data.maxlen)
+ break;
+
+ if (instr->ctx.data.len - *start_offset >
+ pat->ctx.data.maxlen) {
+ *start_offset += pat->ctx.data.maxlen;
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
+ * nand_op_parser_match_pat - Checks if a pattern matches the instructions
+ * remaining in the parser context
+ * @pat: the pattern to test
+ * @ctx: the parser context structure to match with the pattern @pat
+ *
+ * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
+ * Returns true if this is the case, false ortherwise. When true is returned,
+ * @ctx->subop is updated with the set of instructions to be passed to the
+ * controller driver.
+ */
+static bool
+nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
+ struct nand_op_parser_ctx *ctx)
+{
+ unsigned int instr_offset = ctx->subop.first_instr_start_off;
+ const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
+ const struct nand_op_instr *instr = ctx->subop.instrs;
+ unsigned int i, ninstrs;
+
+ for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
+ /*
+ * The pattern instruction does not match the operation
+ * instruction. If the instruction is marked optional in the
+ * pattern definition, we skip the pattern element and continue
+ * to the next one. If the element is mandatory, there's no
+ * match and we can return false directly.
+ */
+ if (instr->type != pat->elems[i].type) {
+ if (!pat->elems[i].optional)
+ return false;
+
+ continue;
+ }
+
+ /*
+ * Now check the pattern element constraints. If the pattern is
+ * not able to handle the whole instruction in a single step,
+ * we have to split it.
+ * The last_instr_end_off value comes back updated to point to
+ * the position where we have to split the instruction (the
+ * start of the next subop chunk).
+ */
+ if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
+ &instr_offset)) {
+ ninstrs++;
+ i++;
+ break;
+ }
+
+ instr++;
+ ninstrs++;
+ instr_offset = 0;
+ }
+
+ /*
+ * This can happen if all instructions of a pattern are optional.
+ * Still, if there's not at least one instruction handled by this
+ * pattern, this is not a match, and we should try the next one (if
+ * any).
+ */
+ if (!ninstrs)
+ return false;
+
+ /*
+ * We had a match on the pattern head, but the pattern may be longer
+ * than the instructions we're asked to execute. We need to make sure
+ * there's no mandatory elements in the pattern tail.
+ */
+ for (; i < pat->nelems; i++) {
+ if (!pat->elems[i].optional)
+ return false;
+ }
+
+ /*
+ * We have a match: update the subop structure accordingly and return
+ * true.
+ */
+ ctx->subop.ninstrs = ninstrs;
+ ctx->subop.last_instr_end_off = instr_offset;
+
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ const struct nand_op_instr *instr;
+ char *prefix = " ";
+ unsigned int i;
+
+ pr_debug("executing subop:\n");
+
+ for (i = 0; i < ctx->ninstrs; i++) {
+ instr = &ctx->instrs[i];
+
+ if (instr == &ctx->subop.instrs[0])
+ prefix = " ->";
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ pr_debug("%sCMD [0x%02x]\n", prefix,
+ instr->ctx.cmd.opcode);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
+ instr->ctx.addr.naddrs,
+ instr->ctx.addr.naddrs < 64 ?
+ instr->ctx.addr.naddrs : 64,
+ instr->ctx.addr.addrs);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ pr_debug("%sDATA_IN [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ pr_debug("%sWAITRDY [max %d ms]\n", prefix,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+
+ if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
+ prefix = " ";
+ }
+}
+#else
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ /* NOP */
+}
+#endif
+
+/**
+ * nand_op_parser_exec_op - exec_op parser
+ * @chip: the NAND chip
+ * @parser: patterns description provided by the controller driver
+ * @op: the NAND operation to address
+ * @check_only: when true, the function only checks if @op can be handled but
+ * does not execute the operation
+ *
+ * Helper function designed to ease integration of NAND controller drivers that
+ * only support a limited set of instruction sequences. The supported sequences
+ * are described in @parser, and the framework takes care of splitting @op into
+ * multiple sub-operations (if required) and pass them back to the ->exec()
+ * callback of the matching pattern if @check_only is set to false.
+ *
+ * NAND controller drivers should call this function from their own ->exec_op()
+ * implementation.
+ *
+ * Returns 0 on success, a negative error code otherwise. A failure can be
+ * caused by an unsupported operation (none of the supported patterns is able
+ * to handle the requested operation), or an error returned by one of the
+ * matching pattern->exec() hook.
+ */
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only)
+{
+ struct nand_op_parser_ctx ctx = {
+ .subop.instrs = op->instrs,
+ .instrs = op->instrs,
+ .ninstrs = op->ninstrs,
+ };
+ unsigned int i;
+
+ while (ctx.subop.instrs < op->instrs + op->ninstrs) {
+ int ret;
+
+ for (i = 0; i < parser->npatterns; i++) {
+ const struct nand_op_parser_pattern *pattern;
+
+ pattern = &parser->patterns[i];
+ if (!nand_op_parser_match_pat(pattern, &ctx))
+ continue;
+
+ nand_op_parser_trace(&ctx);
+
+ if (check_only)
+ break;
+
+ ret = pattern->exec(chip, &ctx.subop);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
+ if (i == parser->npatterns) {
+ pr_debug("->exec_op() parser: pattern not found!\n");
+ return -ENOTSUPP;
+ }
+
+ /*
+ * Update the context structure by pointing to the start of the
+ * next subop.
+ */
+ ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
+ if (ctx.subop.last_instr_end_off)
+ ctx.subop.instrs -= 1;
+
+ ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
+
+static bool nand_instr_is_data(const struct nand_op_instr *instr)
+{
+ return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
+ instr->type == NAND_OP_DATA_OUT_INSTR);
+}
+
+static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ return subop && instr_idx < subop->ninstrs;
+}
+
+static int nand_subop_get_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (instr_idx)
+ return 0;
+
+ return subop->first_instr_start_off;
+}
+
+/**
+ * nand_subop_get_addr_start_off - Get the start offset in an address array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr.addrs field of address instructions. This is wrong as address
+ * instructions might be split.
+ *
+ * Given an address instruction, returns the offset of the first cycle to issue.
+ */
+int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
+ return -EINVAL;
+
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
+
+/**
+ * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr->naddrs field of a data instruction. This is wrong as instructions
+ * might be split.
+ *
+ * Given an address instruction, returns the number of address cycle to issue.
+ */
+int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off, end_off;
+
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
+ return -EINVAL;
+
+ start_off = nand_subop_get_addr_start_off(subop, instr_idx);
+
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
+
+ return end_off - start_off;
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
+
+/**
+ * nand_subop_get_data_start_off - Get the start offset in a data array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->data->buf.{in,out} field of data instructions. This is wrong as data
+ * instructions might be split.
+ *
+ * Given a data instruction, returns the offset to start from.
+ */
+int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx]))
+ return -EINVAL;
+
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
+
+/**
+ * nand_subop_get_data_len - Get the number of bytes to retrieve
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->data->len field of a data instruction. This is wrong as data instructions
+ * might be split.
+ *
+ * Returns the length of the chunk of data to send/receive.
+ */
+int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off = 0, end_off;
+
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx]))
+ return -EINVAL;
+
+ start_off = nand_subop_get_data_start_off(subop, instr_idx);
+
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.data.len;
+
+ return end_off - start_off;
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
/**
* nand_reset - Reset and initialize a NAND device
* @chip: The NAND chip
* @chipnr: Internal die id
*
- * Returns 0 for success or negative error code otherwise
+ * Save the timings data structure, then apply SDR timings mode 0 (see
+ * nand_reset_data_interface for details), do the reset operation, and
+ * apply back the previous timings.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
int nand_reset(struct nand_chip *chip, int chipnr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_data_interface saved_data_intf = chip->data_interface;
int ret;
ret = nand_reset_data_interface(chip, chipnr);
@@ -1233,10 +2734,13 @@ int nand_reset(struct nand_chip *chip, int chipnr)
* interface settings, hence this weird ->select_chip() dance.
*/
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ ret = nand_reset_op(chip);
chip->select_chip(mtd, -1);
+ if (ret)
+ return ret;
chip->select_chip(mtd, chipnr);
+ chip->data_interface = saved_data_intf;
ret = nand_setup_data_interface(chip, chipnr);
chip->select_chip(mtd, -1);
if (ret)
@@ -1390,9 +2894,19 @@ EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- chip->read_buf(mtd, buf, mtd->writesize);
- if (oob_required)
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ int ret;
+
+ ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
EXPORT_SYMBOL(nand_read_page_raw);
@@ -1414,29 +2928,50 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
- int steps, size;
+ int steps, size, ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
for (steps = chip->ecc.steps; steps > 0; steps--) {
- chip->read_buf(mtd, buf, eccsize);
+ ret = nand_read_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
buf += eccsize;
if (chip->ecc.prepad) {
- chip->read_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
- chip->read_buf(mtd, oob, eccbytes);
+ ret = nand_read_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->read_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
size = mtd->oobsize - (oob - chip->oob_poi);
- if (size)
- chip->read_buf(mtd, oob, size);
+ if (size) {
+ ret = nand_read_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -1456,8 +2991,8 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
@@ -1521,15 +3056,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
data_col_addr = start_step * chip->ecc.size;
/* If we read not a page aligned data */
- if (data_col_addr != 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
-
p = bufpoi + data_col_addr;
- chip->read_buf(mtd, p, datafrag_len);
+ ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
+ if (ret)
+ return ret;
/* Calculate ECC */
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
- chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
+ chip->ecc.calculate(mtd, p, &chip->ecc.calc_buf[i]);
/*
* The performance is faster if we position offsets according to
@@ -1543,8 +3077,11 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
gaps = 1;
if (gaps) {
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
} else {
/*
* Send the command to read the particular ECC bytes take care
@@ -1558,12 +3095,15 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
(busw - 1))
aligned_len++;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + aligned_pos, -1);
- chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
+ ret = nand_change_read_column_op(chip,
+ mtd->writesize + aligned_pos,
+ &chip->oob_poi[aligned_pos],
+ aligned_len, false);
+ if (ret)
+ return ret;
}
- ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
+ ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
chip->oob_poi, index, eccfrag_len);
if (ret)
return ret;
@@ -1572,13 +3112,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
int stat;
- stat = chip->ecc.correct(mtd, p,
- &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
+ stat = chip->ecc.correct(mtd, p, &chip->ecc.code_buf[i],
+ &chip->ecc.calc_buf[i]);
if (stat == -EBADMSG &&
(chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
/* check for empty pages with bitflips */
stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
- &chip->buffers->ecccode[i],
+ &chip->ecc.code_buf[i],
chip->ecc.bytes,
NULL, 0,
chip->ecc.strength);
@@ -1611,16 +3151,27 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
}
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1674,14 +3225,18 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_code = chip->buffers->ecccode;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
unsigned int max_bitflips = 0;
/* Read the OOB area first */
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ if (ret)
+ return ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1692,7 +3247,11 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
int stat;
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
@@ -1729,7 +3288,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- int i, eccsize = chip->ecc.size;
+ int ret, i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
@@ -1737,25 +3296,44 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *oob = chip->oob_poi;
unsigned int max_bitflips = 0;
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
if (chip->ecc.prepad) {
- chip->read_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
- chip->read_buf(mtd, oob, eccbytes);
+
+ ret = nand_read_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
stat = chip->ecc.correct(mtd, p, oob, NULL);
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->read_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
@@ -1779,8 +3357,11 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
- if (i)
- chip->read_buf(mtd, oob, i);
+ if (i) {
+ ret = nand_read_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
return max_bitflips;
}
@@ -1894,16 +3475,13 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
/* Is the current page in the buffer? */
if (realpage != chip->pagebuf || oob) {
- bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
+ bufpoi = use_bufpoi ? chip->data_buf : buf;
if (use_bufpoi && aligned)
pr_debug("%s: using read bounce buffer for buf@%p\n",
__func__, buf);
read_retry:
- if (nand_standard_page_accessors(&chip->ecc))
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
-
/*
* Now read the page into the buffer. Absent an error,
* the read methods return max bitflips per ecc step.
@@ -1938,7 +3516,7 @@ read_retry:
/* Invalidate page cache */
chip->pagebuf = -1;
}
- memcpy(buf, chip->buffers->databuf + col, bytes);
+ memcpy(buf, chip->data_buf + col, bytes);
}
if (unlikely(oob)) {
@@ -1979,7 +3557,7 @@ read_retry:
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips, ret);
} else {
- memcpy(buf, chip->buffers->databuf + col, bytes);
+ memcpy(buf, chip->data_buf + col, bytes);
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips,
chip->pagebuf_bitflips);
@@ -2027,33 +3605,6 @@ read_retry:
}
/**
- * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
- * @mtd: MTD device structure
- * @from: offset to read from
- * @len: number of bytes to read
- * @retlen: pointer to variable to store the number of read bytes
- * @buf: the databuffer to put data
- *
- * Get hold of the chip and call nand_do_read.
- */
-static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, uint8_t *buf)
-{
- struct mtd_oob_ops ops;
- int ret;
-
- nand_get_device(mtd, FL_READING);
- memset(&ops, 0, sizeof(ops));
- ops.len = len;
- ops.datbuf = buf;
- ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_read_ops(mtd, from, &ops);
- *retlen = ops.retlen;
- nand_release_device(mtd);
- return ret;
-}
-
-/**
* nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -2061,9 +3612,7 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
*/
int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
EXPORT_SYMBOL(nand_read_oob_std);
@@ -2081,25 +3630,43 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size;
uint8_t *bufpoi = chip->oob_poi;
- int i, toread, sndrnd = 0, pos;
+ int i, toread, sndrnd = 0, pos, ret;
+
+ ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
+ if (ret)
+ return ret;
- chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
for (i = 0; i < chip->ecc.steps; i++) {
if (sndrnd) {
+ int ret;
+
pos = eccsize + i * (eccsize + chunk);
if (mtd->writesize > 512)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
+ ret = nand_change_read_column_op(chip, pos,
+ NULL, 0,
+ false);
else
- chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
+ ret = nand_read_page_op(chip, page, pos, NULL,
+ 0);
+
+ if (ret)
+ return ret;
} else
sndrnd = 1;
toread = min_t(int, length, chunk);
- chip->read_buf(mtd, bufpoi, toread);
+
+ ret = nand_read_data_op(chip, bufpoi, toread, false);
+ if (ret)
+ return ret;
+
bufpoi += toread;
length -= toread;
}
- if (length > 0)
- chip->read_buf(mtd, bufpoi, length);
+ if (length > 0) {
+ ret = nand_read_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -2113,18 +3680,8 @@ EXPORT_SYMBOL(nand_read_oob_syndrome);
*/
int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
- int status = 0;
- const uint8_t *buf = chip->oob_poi;
- int length = mtd->oobsize;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, buf, length);
- /* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
EXPORT_SYMBOL(nand_write_oob_std);
@@ -2140,7 +3697,7 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
{
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size, length = mtd->oobsize;
- int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
+ int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
const uint8_t *bufpoi = chip->oob_poi;
/*
@@ -2154,7 +3711,10 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
} else
pos = eccsize;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+ ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; i < steps; i++) {
if (sndcmd) {
if (mtd->writesize <= 512) {
@@ -2163,28 +3723,40 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
len = eccsize;
while (len > 0) {
int num = min_t(int, len, 4);
- chip->write_buf(mtd, (uint8_t *)&fill,
- num);
+
+ ret = nand_write_data_op(chip, &fill,
+ num, false);
+ if (ret)
+ return ret;
+
len -= num;
}
} else {
pos = eccsize + i * (eccsize + chunk);
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
+ ret = nand_change_write_column_op(chip, pos,
+ NULL, 0,
+ false);
+ if (ret)
+ return ret;
}
} else
sndcmd = 1;
len = min_t(int, length, chunk);
- chip->write_buf(mtd, bufpoi, len);
+
+ ret = nand_write_data_op(chip, bufpoi, len, false);
+ if (ret)
+ return ret;
+
bufpoi += len;
length -= len;
}
- if (length > 0)
- chip->write_buf(mtd, bufpoi, length);
-
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
+ if (length > 0) {
+ ret = nand_write_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
EXPORT_SYMBOL(nand_write_oob_syndrome);
@@ -2199,6 +3771,7 @@ EXPORT_SYMBOL(nand_write_oob_syndrome);
static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ unsigned int max_bitflips = 0;
int page, realpage, chipnr;
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtd_ecc_stats stats;
@@ -2214,21 +3787,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
len = mtd_oobavail(mtd, ops);
- if (unlikely(ops->ooboffs >= len)) {
- pr_debug("%s: attempt to start read outside oob\n",
- __func__);
- return -EINVAL;
- }
-
- /* Do not allow reads past end of device */
- if (unlikely(from >= mtd->size ||
- ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
- (from >> chip->page_shift)) * len)) {
- pr_debug("%s: attempt to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
chipnr = (int)(from >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
@@ -2256,6 +3814,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
nand_wait_ready(mtd);
}
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
readlen -= len;
if (!readlen)
break;
@@ -2281,7 +3841,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ return max_bitflips;
}
/**
@@ -2299,13 +3859,6 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
ops->retlen = 0;
- /* Do not allow reads past end of device */
- if (ops->datbuf && (from + ops->len) > mtd->size) {
- pr_debug("%s: attempt to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
if (ops->mode != MTD_OPS_PLACE_OOB &&
ops->mode != MTD_OPS_AUTO_OOB &&
ops->mode != MTD_OPS_RAW)
@@ -2336,11 +3889,20 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
- if (oob_required)
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ int ret;
- return 0;
+ ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
}
EXPORT_SYMBOL(nand_write_page_raw);
@@ -2362,31 +3924,52 @@ static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
- int steps, size;
+ int steps, size, ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
for (steps = chip->ecc.steps; steps > 0; steps--) {
- chip->write_buf(mtd, buf, eccsize);
+ ret = nand_write_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
buf += eccsize;
if (chip->ecc.prepad) {
- chip->write_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
- chip->write_buf(mtd, oob, eccbytes);
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->write_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
size = mtd->oobsize - (oob - chip->oob_poi);
- if (size)
- chip->write_buf(mtd, oob, size);
+ if (size) {
+ ret = nand_write_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
* nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
@@ -2403,7 +3986,7 @@ static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
const uint8_t *p = buf;
/* Software ECC calculation */
@@ -2433,12 +4016,20 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
const uint8_t *p = buf;
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
- chip->write_buf(mtd, p, eccsize);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
}
@@ -2447,9 +4038,11 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
if (ret)
return ret;
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
- return 0;
+ return nand_prog_page_end_op(chip);
}
@@ -2469,7 +4062,7 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
int oob_required, int page)
{
uint8_t *oob_buf = chip->oob_poi;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
@@ -2478,12 +4071,18 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
int oob_bytes = mtd->oobsize / ecc_steps;
int step, ret;
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (step = 0; step < ecc_steps; step++) {
/* configure controller for WRITE access */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
/* write data (untouched subpages already masked by 0xFF) */
- chip->write_buf(mtd, buf, ecc_size);
+ ret = nand_write_data_op(chip, buf, ecc_size, false);
+ if (ret)
+ return ret;
/* mask ECC of un-touched subpages by padding 0xFF */
if ((step < start_step) || (step > end_step))
@@ -2503,16 +4102,18 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
- ecc_calc = chip->buffers->ecccalc;
+ ecc_calc = chip->ecc.calc_buf;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
/* write OOB buffer to NAND device */
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
- return 0;
+ return nand_prog_page_end_op(chip);
}
@@ -2537,33 +4138,55 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
int eccsteps = chip->ecc.steps;
const uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
+ int ret;
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
- chip->write_buf(mtd, p, eccsize);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
if (chip->ecc.prepad) {
- chip->write_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
chip->ecc.calculate(mtd, p, oob);
- chip->write_buf(mtd, oob, eccbytes);
+
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->write_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
- if (i)
- chip->write_buf(mtd, oob, i);
+ if (i) {
+ ret = nand_write_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
@@ -2589,9 +4212,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
else
subpage = 0;
- if (nand_standard_page_accessors(&chip->ecc))
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
if (unlikely(raw))
status = chip->ecc.write_page_raw(mtd, chip, buf,
oob_required, page);
@@ -2605,14 +4225,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (status < 0)
return status;
- if (nand_standard_page_accessors(&chip->ecc)) {
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
- }
-
return 0;
}
@@ -2737,9 +4349,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (part_pagewr)
bytes = min_t(int, bytes - column, writelen);
chip->pagebuf = -1;
- memset(chip->buffers->databuf, 0xff, mtd->writesize);
- memcpy(&chip->buffers->databuf[column], buf, bytes);
- wbuf = chip->buffers->databuf;
+ memset(chip->data_buf, 0xff, mtd->writesize);
+ memcpy(&chip->data_buf[column], buf, bytes);
+ wbuf = chip->data_buf;
}
if (unlikely(oob)) {
@@ -2822,33 +4434,6 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
}
/**
- * nand_write - [MTD Interface] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
- *
- * NAND write with ECC.
- */
-static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const uint8_t *buf)
-{
- struct mtd_oob_ops ops;
- int ret;
-
- nand_get_device(mtd, FL_WRITING);
- memset(&ops, 0, sizeof(ops));
- ops.len = len;
- ops.datbuf = (uint8_t *)buf;
- ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_write_ops(mtd, to, &ops);
- *retlen = ops.retlen;
- nand_release_device(mtd);
- return ret;
-}
-
-/**
* nand_do_write_oob - [MTD Interface] NAND write out-of-band
* @mtd: MTD device structure
* @to: offset to write to
@@ -2874,22 +4459,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- if (unlikely(ops->ooboffs >= len)) {
- pr_debug("%s: attempt to start write outside oob\n",
- __func__);
- return -EINVAL;
- }
-
- /* Do not allow write past end of device */
- if (unlikely(to >= mtd->size ||
- ops->ooboffs + ops->ooblen >
- ((mtd->size >> chip->page_shift) -
- (to >> chip->page_shift)) * len)) {
- pr_debug("%s: attempt to write beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
chipnr = (int)(to >> chip->chip_shift);
/*
@@ -2945,13 +4514,6 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
ops->retlen = 0;
- /* Do not allow writes past end of device */
- if (ops->datbuf && (to + ops->len) > mtd->size) {
- pr_debug("%s: attempt to write beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
nand_get_device(mtd, FL_WRITING);
switch (ops->mode) {
@@ -2984,11 +4546,12 @@ out:
static int single_erase(struct mtd_info *mtd, int page)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ unsigned int eraseblock;
+
/* Send commands to erase a block */
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+ eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
- return chip->waitfunc(mtd, chip);
+ return nand_erase_op(chip, eraseblock);
}
/**
@@ -3072,7 +4635,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
status = chip->erase(mtd, page & chip->pagemask);
/* See if block erase succeeded */
- if (status & NAND_STATUS_FAIL) {
+ if (status) {
pr_debug("%s: failed erase, page 0x%08x\n",
__func__, page);
instr->state = MTD_ERASE_FAILED;
@@ -3215,22 +4778,12 @@ static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
- int status;
- int i;
-
if (!chip->onfi_version ||
!(le16_to_cpu(chip->onfi_params.opt_cmd)
& ONFI_OPT_CMD_SET_GET_FEATURES))
return -EINVAL;
- chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- chip->write_byte(mtd, subfeature_param[i]);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
- return 0;
+ return nand_set_features_op(chip, addr, subfeature_param);
}
/**
@@ -3243,17 +4796,12 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
- int i;
-
if (!chip->onfi_version ||
!(le16_to_cpu(chip->onfi_params.opt_cmd)
& ONFI_OPT_CMD_SET_GET_FEATURES))
return -EINVAL;
- chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- *subfeature_param++ = chip->read_byte(mtd);
- return 0;
+ return nand_get_features_op(chip, addr, subfeature_param);
}
/**
@@ -3319,7 +4867,7 @@ static void nand_set_defaults(struct nand_chip *chip)
chip->chip_delay = 20;
/* check, if a user supplied command function given */
- if (chip->cmdfunc == NULL)
+ if (!chip->cmdfunc && !chip->exec_op)
chip->cmdfunc = nand_command;
/* check, if a user supplied wait function given */
@@ -3396,12 +4944,11 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
struct nand_onfi_params *p)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
struct onfi_ext_param_page *ep;
struct onfi_ext_section *s;
struct onfi_ext_ecc_info *ecc;
uint8_t *cursor;
- int ret = -EINVAL;
+ int ret;
int len;
int i;
@@ -3411,14 +4958,18 @@ static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
return -ENOMEM;
/* Send our own NAND_CMD_PARAM. */
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+ ret = nand_read_param_page_op(chip, 0, NULL, 0);
+ if (ret)
+ goto ext_out;
/* Use the Change Read Column command to skip the ONFI param pages. */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- sizeof(*p) * p->num_of_param_pages , -1);
+ ret = nand_change_read_column_op(chip,
+ sizeof(*p) * p->num_of_param_pages,
+ ep, len, true);
+ if (ret)
+ goto ext_out;
- /* Read out the Extended Parameter Page. */
- chip->read_buf(mtd, (uint8_t *)ep, len);
+ ret = -EINVAL;
if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
!= le16_to_cpu(ep->crc))) {
pr_debug("fail in the CRC.\n");
@@ -3471,19 +5022,23 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_onfi_params *p = &chip->onfi_params;
- int i, j;
- int val;
+ char id[4];
+ int i, ret, val;
/* Try ONFI for unknown chip or LP */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
- if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
- chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
+ ret = nand_readid_op(chip, 0x20, id, sizeof(id));
+ if (ret || strncmp(id, "ONFI", 4))
+ return 0;
+
+ ret = nand_read_param_page_op(chip, 0, NULL, 0);
+ if (ret)
return 0;
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
for (i = 0; i < 3; i++) {
- for (j = 0; j < sizeof(*p); j++)
- ((uint8_t *)p)[j] = chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ if (ret)
+ return 0;
+
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
le16_to_cpu(p->crc)) {
break;
@@ -3574,20 +5129,22 @@ static int nand_flash_detect_jedec(struct nand_chip *chip)
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_jedec_params *p = &chip->jedec_params;
struct jedec_ecc_info *ecc;
- int val;
- int i, j;
+ char id[5];
+ int i, val, ret;
/* Try JEDEC for unknown chip or LP */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
- if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
- chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
- chip->read_byte(mtd) != 'C')
+ ret = nand_readid_op(chip, 0x40, id, sizeof(id));
+ if (ret || strncmp(id, "JEDEC", sizeof(id)))
+ return 0;
+
+ ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
+ if (ret)
return 0;
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
for (i = 0; i < 3; i++) {
- for (j = 0; j < sizeof(*p); j++)
- ((uint8_t *)p)[j] = chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ if (ret)
+ return 0;
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
le16_to_cpu(p->crc))
@@ -3866,8 +5423,7 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
{
const struct nand_manufacturer *manufacturer;
struct mtd_info *mtd = nand_to_mtd(chip);
- int busw;
- int i;
+ int busw, ret;
u8 *id_data = chip->id.data;
u8 maf_id, dev_id;
@@ -3875,17 +5431,21 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
* after power-up.
*/
- nand_reset(chip, 0);
+ ret = nand_reset(chip, 0);
+ if (ret)
+ return ret;
/* Select the device */
chip->select_chip(mtd, 0);
/* Send the command for reading device ID */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ ret = nand_readid_op(chip, 0, id_data, 2);
+ if (ret)
+ return ret;
/* Read manufacturer and device IDs */
- maf_id = chip->read_byte(mtd);
- dev_id = chip->read_byte(mtd);
+ maf_id = id_data[0];
+ dev_id = id_data[1];
/*
* Try again to make sure, as some systems the bus-hold or other
@@ -3894,11 +5454,10 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
* not match, ignore the device completely.
*/
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
/* Read entire ID string */
- for (i = 0; i < ARRAY_SIZE(chip->id.data); i++)
- id_data[i] = chip->read_byte(mtd);
+ ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
+ if (ret)
+ return ret;
if (id_data[0] != maf_id || id_data[1] != dev_id) {
pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
@@ -4190,6 +5749,9 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
+ /* Enforce the right timings for reset/detection */
+ onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
+
ret = nand_dt_init(chip);
if (ret)
return ret;
@@ -4197,15 +5759,21 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
if (!mtd->name && mtd->dev.parent)
mtd->name = dev_name(mtd->dev.parent);
- if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
+ /*
+ * ->cmdfunc() is legacy and will only be used if ->exec_op() is not
+ * populated.
+ */
+ if (!chip->exec_op) {
/*
- * Default functions assigned for chip_select() and
- * cmdfunc() both expect cmd_ctrl() to be populated,
- * so we need to check that that's the case
+ * Default functions assigned for ->cmdfunc() and
+ * ->select_chip() both expect ->cmd_ctrl() to be populated.
*/
- pr_err("chip.cmd_ctrl() callback is not provided");
- return -EINVAL;
+ if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
+ pr_err("->cmd_ctrl() should be provided\n");
+ return -EINVAL;
+ }
}
+
/* Set the default functions */
nand_set_defaults(chip);
@@ -4225,15 +5793,16 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
+ u8 id[2];
+
/* See comment in nand_get_flash_type for reset */
nand_reset(chip, i);
chip->select_chip(mtd, i);
/* Send the command for reading device ID */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ nand_readid_op(chip, 0, id, sizeof(id));
/* Read manufacturer and device IDs */
- if (nand_maf_id != chip->read_byte(mtd) ||
- nand_dev_id != chip->read_byte(mtd)) {
+ if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
chip->select_chip(mtd, -1);
break;
}
@@ -4600,26 +6169,6 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd)
return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
}
-static bool invalid_ecc_page_accessors(struct nand_chip *chip)
-{
- struct nand_ecc_ctrl *ecc = &chip->ecc;
-
- if (nand_standard_page_accessors(ecc))
- return false;
-
- /*
- * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
- * controller driver implements all the page accessors because
- * default helpers are not suitable when the core does not
- * send the READ0/PAGEPROG commands.
- */
- return (!ecc->read_page || !ecc->write_page ||
- !ecc->read_page_raw || !ecc->write_page_raw ||
- (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
- (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
- ecc->hwctl && ecc->calculate));
-}
-
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
@@ -4632,7 +6181,6 @@ int nand_scan_tail(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- struct nand_buffers *nbuf = NULL;
int ret, i;
/* New bad blocks should be marked in OOB, flash-based BBT, or both */
@@ -4641,39 +6189,9 @@ int nand_scan_tail(struct mtd_info *mtd)
return -EINVAL;
}
- if (invalid_ecc_page_accessors(chip)) {
- pr_err("Invalid ECC page accessors setup\n");
- return -EINVAL;
- }
-
- if (!(chip->options & NAND_OWN_BUFFERS)) {
- nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
- if (!nbuf)
- return -ENOMEM;
-
- nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
- if (!nbuf->ecccalc) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
- if (!nbuf->ecccode) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
- GFP_KERNEL);
- if (!nbuf->databuf) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- chip->buffers = nbuf;
- } else if (!chip->buffers) {
+ chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+ if (!chip->data_buf)
return -ENOMEM;
- }
/*
* FIXME: some NAND manufacturer drivers expect the first die to be
@@ -4685,10 +6203,10 @@ int nand_scan_tail(struct mtd_info *mtd)
ret = nand_manufacturer_init(chip);
chip->select_chip(mtd, -1);
if (ret)
- goto err_free_nbuf;
+ goto err_free_buf;
/* Set the internal oob buffer location, just after the page data */
- chip->oob_poi = chip->buffers->databuf + mtd->writesize;
+ chip->oob_poi = chip->data_buf + mtd->writesize;
/*
* If no default placement scheme is given, select an appropriate one.
@@ -4836,6 +6354,15 @@ int nand_scan_tail(struct mtd_info *mtd)
goto err_nand_manuf_cleanup;
}
+ if (ecc->correct || ecc->calculate) {
+ ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ if (!ecc->calc_buf || !ecc->code_buf) {
+ ret = -ENOMEM;
+ goto err_nand_manuf_cleanup;
+ }
+ }
+
/* For many systems, the standard OOB write also works for raw */
if (!ecc->read_oob_raw)
ecc->read_oob_raw = ecc->read_oob;
@@ -4917,8 +6444,6 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->_erase = nand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
- mtd->_read = nand_read;
- mtd->_write = nand_write;
mtd->_panic_write = panic_nand_write;
mtd->_read_oob = nand_read_oob;
mtd->_write_oob = nand_write_oob;
@@ -4954,7 +6479,7 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->select_chip(mtd, -1);
if (ret)
- goto err_nand_data_iface_cleanup;
+ goto err_nand_manuf_cleanup;
}
/* Check, if we should skip the bad block table scan */
@@ -4964,23 +6489,18 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Build bad block table */
ret = chip->scan_bbt(mtd);
if (ret)
- goto err_nand_data_iface_cleanup;
+ goto err_nand_manuf_cleanup;
return 0;
-err_nand_data_iface_cleanup:
- nand_release_data_interface(chip);
err_nand_manuf_cleanup:
nand_manufacturer_cleanup(chip);
-err_free_nbuf:
- if (nbuf) {
- kfree(nbuf->databuf);
- kfree(nbuf->ecccode);
- kfree(nbuf->ecccalc);
- kfree(nbuf);
- }
+err_free_buf:
+ kfree(chip->data_buf);
+ kfree(ecc->code_buf);
+ kfree(ecc->calc_buf);
return ret;
}
@@ -5028,16 +6548,11 @@ void nand_cleanup(struct nand_chip *chip)
chip->ecc.algo == NAND_ECC_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
- nand_release_data_interface(chip);
-
/* Free bad block table memory */
kfree(chip->bbt);
- if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) {
- kfree(chip->buffers->databuf);
- kfree(chip->buffers->ecccode);
- kfree(chip->buffers->ecccalc);
- kfree(chip->buffers);
- }
+ kfree(chip->data_buf);
+ kfree(chip->ecc.code_buf);
+ kfree(chip->ecc.calc_buf);
/* Free bad block descriptor memory */
if (chip->badblock_pattern && chip->badblock_pattern->options
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 2915b67..3609285 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -898,7 +898,7 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
{
struct nand_chip *this = mtd_to_nand(mtd);
- return create_bbt(mtd, this->buffers->databuf, bd, -1);
+ return create_bbt(mtd, this->data_buf, bd, -1);
}
/**
diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c
index 985751e..d542908a 100644
--- a/drivers/mtd/nand/nand_hynix.c
+++ b/drivers/mtd/nand/nand_hynix.c
@@ -67,15 +67,43 @@ struct hynix_read_retry_otp {
static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
{
+ u8 jedecid[5] = { };
+ int ret;
+
+ ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
+ if (ret)
+ return false;
+
+ return !strncmp("JEDEC", jedecid, sizeof(jedecid));
+}
+
+static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(cmd, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, cmd, -1, -1);
+
+ return 0;
+}
+
+static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
+{
struct mtd_info *mtd = nand_to_mtd(chip);
- u8 jedecid[6] = { };
- int i = 0;
+ u16 column = ((u16)addr << 8) | addr;
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
- for (i = 0; i < 5; i++)
- jedecid[i] = chip->read_byte(mtd);
+ chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
+ chip->write_byte(mtd, val);
- return !strcmp("JEDEC", jedecid);
+ return 0;
}
static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
@@ -83,14 +111,15 @@ static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
struct nand_chip *chip = mtd_to_nand(mtd);
struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
const u8 *values;
- int status;
- int i;
+ int i, ret;
values = hynix->read_retry->values +
(retry_mode * hynix->read_retry->nregs);
/* Enter 'Set Hynix Parameters' mode */
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
/*
* Configure the NAND in the requested read-retry mode.
@@ -102,21 +131,14 @@ static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
* probably tweaked at production in this case).
*/
for (i = 0; i < hynix->read_retry->nregs; i++) {
- int column = hynix->read_retry->regs[i];
-
- column |= column << 8;
- chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
- chip->write_byte(mtd, values[i]);
+ ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
+ values[i]);
+ if (ret)
+ return ret;
}
/* Apply the new settings. */
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
-
- return 0;
+ return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
}
/**
@@ -172,40 +194,63 @@ static int hynix_read_rr_otp(struct nand_chip *chip,
const struct hynix_read_retry_otp *info,
void *buf)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int i;
+ int i, ret;
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
for (i = 0; i < info->nregs; i++) {
- int column = info->regs[i];
-
- column |= column << 8;
- chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
- chip->write_byte(mtd, info->values[i]);
+ ret = hynix_nand_reg_write_op(chip, info->regs[i],
+ info->values[i]);
+ if (ret)
+ return ret;
}
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
/* Sequence to enter OTP mode? */
- chip->cmdfunc(mtd, 0x17, -1, -1);
- chip->cmdfunc(mtd, 0x04, -1, -1);
- chip->cmdfunc(mtd, 0x19, -1, -1);
+ ret = hynix_nand_cmd_op(chip, 0x17);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x4);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x19);
+ if (ret)
+ return ret;
/* Now read the page */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, info->page);
- chip->read_buf(mtd, buf, info->size);
+ ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
+ if (ret)
+ return ret;
/* Put everything back to normal */
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, 0x38, -1);
- chip->write_byte(mtd, 0x0);
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, -1);
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
- return 0;
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_reg_write_op(chip, 0x38, 0);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
+
+ return nand_read_page_op(chip, 0, 0, NULL, 0);
}
#define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
diff --git a/drivers/mtd/nand/nand_micron.c b/drivers/mtd/nand/nand_micron.c
index abf6a3c..02e109a 100644
--- a/drivers/mtd/nand/nand_micron.c
+++ b/drivers/mtd/nand/nand_micron.c
@@ -117,16 +117,28 @@ micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required,
int page)
{
- int status;
- int max_bitflips = 0;
+ u8 status;
+ int ret, max_bitflips = 0;
- micron_nand_on_die_ecc_setup(chip, true);
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ goto out;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ goto out;
+
+ ret = nand_exit_status_op(chip);
+ if (ret)
+ goto out;
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- status = chip->read_byte(mtd);
if (status & NAND_STATUS_FAIL)
mtd->ecc_stats.failed++;
+
/*
* The internal ECC doesn't tell us the number of bitflips
* that have been corrected, but tells us if it recommends to
@@ -137,13 +149,15 @@ micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
else if (status & NAND_STATUS_WRITE_RECOMMENDED)
max_bitflips = chip->ecc.strength;
- chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
-
- nand_read_page_raw(mtd, chip, buf, oob_required, page);
+ ret = nand_read_data_op(chip, buf, mtd->writesize, false);
+ if (!ret && oob_required)
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+out:
micron_nand_on_die_ecc_setup(chip, false);
- return max_bitflips;
+ return ret ? ret : max_bitflips;
}
static int
@@ -151,46 +165,16 @@ micron_nand_write_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
- int status;
-
- micron_nand_on_die_ecc_setup(chip, true);
+ int ret;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- nand_write_page_raw(mtd, chip, buf, oob_required, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+ ret = nand_write_page_raw(mtd, chip, buf, oob_required, page);
micron_nand_on_die_ecc_setup(chip, false);
- return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-
-static int
-micron_nand_read_page_raw_on_die_ecc(struct mtd_info *mtd,
- struct nand_chip *chip,
- uint8_t *buf, int oob_required,
- int page)
-{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- nand_read_page_raw(mtd, chip, buf, oob_required, page);
-
- return 0;
-}
-
-static int
-micron_nand_write_page_raw_on_die_ecc(struct mtd_info *mtd,
- struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
- int page)
-{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- nand_write_page_raw(mtd, chip, buf, oob_required, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return ret;
}
enum {
@@ -285,17 +269,14 @@ static int micron_nand_init(struct nand_chip *chip)
return -EINVAL;
}
- chip->ecc.options = NAND_ECC_CUSTOM_PAGE_ACCESS;
chip->ecc.bytes = 8;
chip->ecc.size = 512;
chip->ecc.strength = 4;
chip->ecc.algo = NAND_ECC_BCH;
chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
- chip->ecc.read_page_raw =
- micron_nand_read_page_raw_on_die_ecc;
- chip->ecc.write_page_raw =
- micron_nand_write_page_raw_on_die_ecc;
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
mtd_set_ooblayout(mtd, &micron_nand_on_die_ooblayout_ops);
}
diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c
index d348f01..ef022f6 100644
--- a/drivers/mtd/nand/nand_samsung.c
+++ b/drivers/mtd/nand/nand_samsung.c
@@ -91,6 +91,25 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
}
} else {
nand_decode_ext_id(chip);
+
+ if (nand_is_slc(chip)) {
+ switch (chip->id.data[1]) {
+ /* K9F4G08U0D-S[I|C]B0(T00) */
+ case 0xDC:
+ chip->ecc_step_ds = 512;
+ chip->ecc_strength_ds = 1;
+ break;
+
+ /* K9F1G08U0E 21nm chips do not support subpage write */
+ case 0xF1:
+ if (chip->id.len > 4 &&
+ (chip->id.data[4] & GENMASK(1, 0)) == 0x1)
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ break;
+ default:
+ break;
+ }
+ }
}
}
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index 5d1533b..9400d03 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -283,16 +283,16 @@ const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
/**
- * onfi_init_data_interface - [NAND Interface] Initialize a data interface from
+ * onfi_fill_data_interface - [NAND Interface] Initialize a data interface from
* given ONFI mode
- * @iface: The data interface to be initialized
* @mode: The ONFI timing mode
*/
-int onfi_init_data_interface(struct nand_chip *chip,
- struct nand_data_interface *iface,
+int onfi_fill_data_interface(struct nand_chip *chip,
enum nand_data_interface_type type,
int timing_mode)
{
+ struct nand_data_interface *iface = &chip->data_interface;
+
if (type != NAND_SDR_IFACE)
return -EINVAL;
@@ -321,15 +321,4 @@ int onfi_init_data_interface(struct nand_chip *chip,
return 0;
}
-EXPORT_SYMBOL(onfi_init_data_interface);
-
-/**
- * nand_get_default_data_interface - [NAND Interface] Retrieve NAND
- * data interface for mode 0. This is used as default timing after
- * reset.
- */
-const struct nand_data_interface *nand_get_default_data_interface(void)
-{
- return &onfi_sdr_timings[0];
-}
-EXPORT_SYMBOL(nand_get_default_data_interface);
+EXPORT_SYMBOL(onfi_fill_data_interface);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index dad438c..8cdf7d3 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1530,7 +1530,9 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
int ret;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
/* Enable GPMC ecc engine */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -1548,7 +1550,8 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
/* Write ecc vector to OOB area */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
/**
@@ -1568,7 +1571,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
u32 data_len, const u8 *buf,
int oob_required, int page)
{
- u8 *ecc_calc = chip->buffers->ecccalc;
+ u8 *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
@@ -1582,6 +1585,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
* ECC is calculated for all subpages but we choose
* only what we want.
*/
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
/* Enable GPMC ECC engine */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -1605,7 +1609,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
- ecc_calc = chip->buffers->ecccalc;
+ ecc_calc = chip->ecc.calc_buf;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
@@ -1614,7 +1618,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
/* write OOB buffer to NAND device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
@@ -1635,11 +1639,13 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
int stat, ret;
unsigned int max_bitflips = 0;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
/* Enable GPMC ecc engine */
chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1647,10 +1653,10 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->read_buf(mtd, buf, mtd->writesize);
/* Read oob bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + BADBLOCK_MARKER_LENGTH, -1);
- chip->read_buf(mtd, chip->oob_poi + BADBLOCK_MARKER_LENGTH,
- chip->ecc.total);
+ nand_change_read_column_op(chip,
+ mtd->writesize + BADBLOCK_MARKER_LENGTH,
+ chip->oob_poi + BADBLOCK_MARKER_LENGTH,
+ chip->ecc.total, false);
/* Calculate ecc bytes */
omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 90b9a9c..d1979c7 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -520,15 +520,13 @@ static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
struct nand_chip *chip = &host->chip;
struct pxa3xx_nand_info *info = host->info_data;
const struct pxa3xx_nand_flash *f = NULL;
- struct mtd_info *mtd = nand_to_mtd(&host->chip);
int i, id, ntypes;
+ u8 idbuf[2];
ntypes = ARRAY_SIZE(builtin_flash_types);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
- id = chip->read_byte(mtd);
- id |= chip->read_byte(mtd) << 0x8;
+ nand_readid_op(chip, 0, idbuf, sizeof(idbuf));
+ id = idbuf[0] | (idbuf[1] << 8);
for (i = 0; i < ntypes; i++) {
f = &builtin_flash_types[i];
@@ -963,6 +961,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
switch (command) {
case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
case NAND_CMD_PAGEPROG:
info->use_ecc = 1;
break;
@@ -1350,10 +1349,10 @@ static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
@@ -1363,7 +1362,7 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
struct pxa3xx_nand_info *info = host->info_data;
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (info->retcode == ERR_CORERR && info->use_ecc) {
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 2656c1a..6be5558 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -1725,6 +1725,7 @@ static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
u8 *data_buf, *oob_buf = NULL;
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
@@ -1750,6 +1751,7 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
int i, ret;
int read_loc;
+ nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = chip->oob_poi;
@@ -1850,6 +1852,8 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
u8 *data_buf, *oob_buf;
int i, ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -1902,6 +1906,9 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
free_descs(nandc);
+ if (!ret)
+ ret = nand_prog_page_end_op(chip);
+
return ret;
}
@@ -1916,6 +1923,7 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
u8 *data_buf, *oob_buf;
int i, ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -1970,6 +1978,9 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
free_descs(nandc);
+ if (!ret)
+ ret = nand_prog_page_end_op(chip);
+
return ret;
}
@@ -1990,7 +2001,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *oob = chip->oob_poi;
int data_size, oob_size;
- int ret, status = 0;
+ int ret;
host->use_ecc = true;
@@ -2027,11 +2038,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
return -EIO;
}
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
@@ -2081,7 +2088,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- int page, ret, status = 0;
+ int page, ret;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2114,11 +2121,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
return -EIO;
}
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
/*
@@ -2636,6 +2639,9 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
nand_set_flash_node(chip, dn);
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
+ if (!mtd->name)
+ return -ENOMEM;
+
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index fc9287a..595635b 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -364,7 +364,7 @@ static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
struct r852_device *dev = nand_get_controller_data(chip);
unsigned long timeout;
- int status;
+ u8 status;
timeout = jiffies + (chip->state == FL_ERASING ?
msecs_to_jiffies(400) : msecs_to_jiffies(20));
@@ -373,8 +373,7 @@ static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
if (chip->dev_ready(mtd))
break;
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- status = (int)chip->read_byte(mtd);
+ nand_status_op(chip, &status);
/* Unfortunelly, no way to send detailed error status... */
if (dev->dma_error) {
@@ -522,9 +521,7 @@ exit:
static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/*
@@ -1046,7 +1043,7 @@ static int r852_resume(struct device *device)
if (dev->card_registred) {
r852_engine_enable(dev);
dev->chip->select_chip(mtd, 0);
- dev->chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(dev->chip);
dev->chip->select_chip(mtd, -1);
}
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 3c5008a..c4e7755 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -614,7 +614,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
@@ -624,9 +624,9 @@ static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
index d3e028e..1581671 100644
--- a/drivers/mtd/nand/sm_common.h
+++ b/drivers/mtd/nand/sm_common.h
@@ -36,7 +36,7 @@ struct sm_oob {
#define SM_SMALL_OOB_SIZE 8
-extern int sm_register_device(struct mtd_info *mtd, int smartmedia);
+int sm_register_device(struct mtd_info *mtd, int smartmedia);
static inline int sm_sector_valid(struct sm_oob *oob)
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 82244be..f5a55c6 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -958,12 +958,12 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
int ret;
if (*cur_off != data_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
+ nand_change_read_column_op(nand, data_off, NULL, 0, false);
sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ nand_change_read_column_op(nand, oob_off, NULL, 0, false);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
@@ -991,16 +991,15 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
* Re-read the data with the randomizer disabled to identify
* bitflips in erased pages.
*/
- if (nand->options & NAND_NEED_SCRAMBLING) {
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
- nand->read_buf(mtd, data, ecc->size);
- } else {
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ nand_change_read_column_op(nand, data_off, data,
+ ecc->size, false);
+ else
memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
ecc->size);
- }
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand, oob_off, oob, ecc->bytes + 4,
+ false);
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 4,
@@ -1011,7 +1010,8 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
if (oob_required) {
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ nand_change_read_column_op(nand, oob_off, NULL, 0,
+ false);
sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
true, page);
@@ -1038,8 +1038,8 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
return;
if (!cur_off || *cur_off != offset)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- offset + mtd->writesize, -1);
+ nand_change_read_column_op(nand, mtd->writesize, NULL, 0,
+ false);
if (!randomize)
sunxi_nfc_read_buf(mtd, oob + offset, len);
@@ -1116,9 +1116,9 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
if (oob_required && !erased) {
/* TODO: use DMA to retrieve OOB */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand,
+ mtd->writesize + oob_off,
+ oob, ecc->bytes + 4, false);
sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
!i, page);
@@ -1143,18 +1143,17 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
/*
* Re-read the data with the randomizer disabled to
* identify bitflips in erased pages.
+ * TODO: use DMA to read page in raw mode
*/
- if (randomized) {
- /* TODO: use DMA to read page in raw mode */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- data_off, -1);
- nand->read_buf(mtd, data, ecc->size);
- }
+ if (randomized)
+ nand_change_read_column_op(nand, data_off,
+ data, ecc->size,
+ false);
/* TODO: use DMA to retrieve OOB */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand,
+ mtd->writesize + oob_off,
+ oob, ecc->bytes + 4, false);
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 4,
@@ -1187,12 +1186,12 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
int ret;
if (data_off != *cur_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
+ nand_change_write_column_op(nand, data_off, NULL, 0, false);
sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
+ nand_change_write_column_op(nand, oob_off, NULL, 0, false);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
@@ -1228,8 +1227,8 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
return;
if (!cur_off || *cur_off != offset)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN,
- offset + mtd->writesize, -1);
+ nand_change_write_column_op(nand, offset + mtd->writesize,
+ NULL, 0, false);
sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
@@ -1246,6 +1245,8 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
int ret, i, cur_off = 0;
bool raw_mode = false;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1279,14 +1280,14 @@ static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
{
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
chip->ecc.steps);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
-
return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
}
@@ -1299,6 +1300,8 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
int ret, i, cur_off = 0;
unsigned int max_bitflips = 0;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = data_offs / ecc->size;
@@ -1330,13 +1333,13 @@ static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
-
return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
buf, page);
}
@@ -1349,6 +1352,8 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1370,7 +1375,7 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
@@ -1382,6 +1387,8 @@ static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = data_offs / ecc->size;
@@ -1400,7 +1407,7 @@ static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
@@ -1430,6 +1437,8 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
}
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
sunxi_nfc_randomizer_config(mtd, page, false);
sunxi_nfc_randomizer_enable(mtd);
@@ -1460,7 +1469,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
NULL, page);
- return 0;
+ return nand_prog_page_end_op(chip);
pio_fallback:
return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
@@ -1476,6 +1485,8 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
int ret, i, cur_off = 0;
bool raw_mode = false;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1512,6 +1523,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1533,41 +1546,33 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd,
struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
chip->pagebuf = -1;
- return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page);
+ return chip->ecc.read_page(mtd, chip, chip->data_buf, 1, page);
}
static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd,
struct nand_chip *chip,
int page)
{
- int ret, status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ int ret;
chip->pagebuf = -1;
- memset(chip->buffers->databuf, 0xff, mtd->writesize);
- ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page);
+ memset(chip->data_buf, 0xff, mtd->writesize);
+ ret = chip->ecc.write_page(mtd, chip, chip->data_buf, 1, page);
if (ret)
return ret;
/* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static const s32 tWB_lut[] = {6, 12, 16, 20};
@@ -1853,8 +1858,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
/* Add ECC info retrieval from DT */
for (i = 0; i < ARRAY_SIZE(strengths); i++) {
- if (ecc->strength <= strengths[i])
+ if (ecc->strength <= strengths[i]) {
+ /*
+ * Update ecc->strength value with the actual strength
+ * that will be used by the ECC engine.
+ */
+ ecc->strength = strengths[i];
break;
+ }
}
if (i >= ARRAY_SIZE(strengths)) {
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 766906f..c5bee00b 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -329,7 +329,7 @@ static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
if (!*buf) {
/* skip over "len" bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, *pos, -1);
+ nand_change_read_column_op(chip, *pos, NULL, 0, false);
} else {
tango_read_buf(mtd, *buf, len);
*buf += len;
@@ -344,7 +344,7 @@ static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos)
if (!*buf) {
/* skip over "len" bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, *pos, -1);
+ nand_change_write_column_op(chip, *pos, NULL, 0, false);
} else {
tango_write_buf(mtd, *buf, len);
*buf += len;
@@ -427,7 +427,7 @@ static void raw_write(struct nand_chip *chip, const u8 *buf, const u8 *oob)
static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
u8 *buf, int oob_required, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
raw_read(chip, buf, chip->oob_poi);
return 0;
}
@@ -435,23 +435,15 @@ static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
static int tango_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const u8 *buf, int oob_required, int page)
{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
raw_write(chip, buf, chip->oob_poi);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
-
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
raw_read(chip, NULL, chip->oob_poi);
return 0;
}
@@ -459,11 +451,9 @@ static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
static int tango_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
raw_write(chip, NULL, chip->oob_poi);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- chip->waitfunc(mtd, chip);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int oob_ecc(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
@@ -590,7 +580,6 @@ static int chip_init(struct device *dev, struct device_node *np)
ecc->write_page = tango_write_page;
ecc->read_oob = tango_read_oob;
ecc->write_oob = tango_write_oob;
- ecc->options = NAND_ECC_CUSTOM_PAGE_ACCESS;
err = nand_scan_tail(mtd);
if (err)
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 84dbf32..dcaa924 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -192,6 +192,7 @@ tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
long timeout;
+ u8 status;
/* enable RDYREQ interrupt */
tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
@@ -212,8 +213,8 @@ tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
}
- nand_chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- return nand_chip->read_byte(mtd);
+ nand_status_op(nand_chip, &status);
+ return status;
}
/*
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 8037d4b..80d31a5 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -560,7 +560,7 @@ static int vf610_nfc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
int eccsize = chip->ecc.size;
int stat;
- vf610_nfc_read_buf(mtd, buf, eccsize);
+ nand_read_page_op(chip, page, 0, buf, eccsize);
if (oob_required)
vf610_nfc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -580,7 +580,7 @@ static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
{
struct vf610_nfc *nfc = mtd_to_nfc(mtd);
- vf610_nfc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
vf610_nfc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -588,7 +588,7 @@ static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
nfc->use_hw_ecc = true;
nfc->write_sz = mtd->writesize + mtd->oobsize;
- return 0;
+ return nand_prog_page_end_op(chip);
}
static const struct of_device_id vf610_nfc_dt_ids[] = {
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index dcae2f6..9dc1574 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -4,8 +4,7 @@ menuconfig MTD_ONENAND
depends on HAS_IOMEM
help
This enables support for accessing all type of OneNAND flash
- devices. For further information see
- <http://www.samsung.com/Products/Semiconductor/OneNAND/index.htm>
+ devices.
if MTD_ONENAND
@@ -26,9 +25,11 @@ config MTD_ONENAND_GENERIC
config MTD_ONENAND_OMAP2
tristate "OneNAND on OMAP2/OMAP3 support"
depends on ARCH_OMAP2 || ARCH_OMAP3
+ depends on OF || COMPILE_TEST
help
- Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU
+ Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC
via the GPMC memory controller.
+ Enable dmaengine and gpiolib for better performance.
config MTD_ONENAND_SAMSUNG
tristate "OneNAND on Samsung SOC controller support"
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 24a1388..87c34f6 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -28,19 +28,18 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
+#include <linux/omap-gpmc.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <asm/mach/flash.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
-
-#include <linux/omap-dma.h>
#define DRIVER_NAME "omap2-onenand"
@@ -50,24 +49,17 @@ struct omap2_onenand {
struct platform_device *pdev;
int gpmc_cs;
unsigned long phys_base;
- unsigned int mem_size;
- int gpio_irq;
+ struct gpio_desc *int_gpiod;
struct mtd_info mtd;
struct onenand_chip onenand;
struct completion irq_done;
struct completion dma_done;
- int dma_channel;
- int freq;
- int (*setup)(void __iomem *base, int *freq_ptr);
- struct regulator *regulator;
- u8 flags;
+ struct dma_chan *dma_chan;
};
-static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
+static void omap2_onenand_dma_complete_func(void *completion)
{
- struct omap2_onenand *c = data;
-
- complete(&c->dma_done);
+ complete(completion);
}
static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
@@ -90,6 +82,65 @@ static inline void write_reg(struct omap2_onenand *c, unsigned short value,
writew(value, c->onenand.base + reg);
}
+static int omap2_onenand_set_cfg(struct omap2_onenand *c,
+ bool sr, bool sw,
+ int latency, int burst_len)
+{
+ unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
+
+ reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
+
+ switch (burst_len) {
+ case 0: /* continuous */
+ break;
+ case 4:
+ reg |= ONENAND_SYS_CFG1_BL_4;
+ break;
+ case 8:
+ reg |= ONENAND_SYS_CFG1_BL_8;
+ break;
+ case 16:
+ reg |= ONENAND_SYS_CFG1_BL_16;
+ break;
+ case 32:
+ reg |= ONENAND_SYS_CFG1_BL_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (latency > 5)
+ reg |= ONENAND_SYS_CFG1_HF;
+ if (latency > 7)
+ reg |= ONENAND_SYS_CFG1_VHF;
+ if (sr)
+ reg |= ONENAND_SYS_CFG1_SYNC_READ;
+ if (sw)
+ reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
+
+ write_reg(c, reg, ONENAND_REG_SYS_CFG1);
+
+ return 0;
+}
+
+static int omap2_onenand_get_freq(int ver)
+{
+ switch ((ver >> 4) & 0xf) {
+ case 0:
+ return 40;
+ case 1:
+ return 54;
+ case 2:
+ return 66;
+ case 3:
+ return 83;
+ case 4:
+ return 104;
+ }
+
+ return -EINVAL;
+}
+
static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
{
printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
@@ -153,28 +204,22 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
syscfg |= ONENAND_SYS_CFG1_IOBE;
write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
- if (c->flags & ONENAND_IN_OMAP34XX)
- /* Add a delay to let GPIO settle */
- syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+ /* Add a delay to let GPIO settle */
+ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
}
reinit_completion(&c->irq_done);
- if (c->gpio_irq) {
- result = gpio_get_value(c->gpio_irq);
- if (result == -1) {
- ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
- intr = read_reg(c, ONENAND_REG_INTERRUPT);
- wait_err("gpio error", state, ctrl, intr);
- return -EIO;
- }
- } else
- result = 0;
- if (result == 0) {
+ result = gpiod_get_value(c->int_gpiod);
+ if (result < 0) {
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ wait_err("gpio error", state, ctrl, intr);
+ return result;
+ } else if (result == 0) {
int retry_cnt = 0;
retry:
- result = wait_for_completion_timeout(&c->irq_done,
- msecs_to_jiffies(20));
- if (result == 0) {
+ if (!wait_for_completion_io_timeout(&c->irq_done,
+ msecs_to_jiffies(20))) {
/* Timeout after 20ms */
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (ctrl & ONENAND_CTRL_ONGO &&
@@ -291,9 +336,42 @@ static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
return 0;
}
-#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
+static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
+ dma_addr_t src, dma_addr_t dst,
+ size_t count)
+{
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+
+ tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0);
+ if (!tx) {
+ dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
+ return -EIO;
+ }
+
+ reinit_completion(&c->dma_done);
+
+ tx->callback = omap2_onenand_dma_complete_func;
+ tx->callback_param = &c->dma_done;
+
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
+ return -EIO;
+ }
+
+ dma_async_issue_pending(c->dma_chan);
+
+ if (!wait_for_completion_io_timeout(&c->dma_done,
+ msecs_to_jiffies(20))) {
+ dmaengine_terminate_sync(c->dma_chan);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
-static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
+static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset,
size_t count)
{
@@ -301,10 +379,9 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
- unsigned long timeout;
void *buf = (void *)buffer;
size_t xtra;
- volatile unsigned *done;
+ int ret;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
@@ -341,25 +418,10 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
}
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count >> 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
-
- timeout = jiffies + msecs_to_jiffies(20);
- done = &c->dma_done.done;
- while (time_before(jiffies, timeout))
- if (*done)
- break;
-
+ ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
- if (!*done) {
+ if (ret) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
goto out_copy;
}
@@ -371,7 +433,7 @@ out_copy:
return 0;
}
-static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
+static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
const unsigned char *buffer,
int offset, size_t count)
{
@@ -379,9 +441,8 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
- unsigned long timeout;
void *buf = (void *)buffer;
- volatile unsigned *done;
+ int ret;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
@@ -412,25 +473,10 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
return -1;
}
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count >> 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
-
- timeout = jiffies + msecs_to_jiffies(20);
- done = &c->dma_done.done;
- while (time_before(jiffies, timeout))
- if (*done)
- break;
-
+ ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
- if (!*done) {
+ if (ret) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
goto out_copy;
}
@@ -442,136 +488,6 @@ out_copy:
return 0;
}
-#else
-
-static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- return -ENOSYS;
-}
-
-static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- return -ENOSYS;
-}
-
-#endif
-
-#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
-
-static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
- struct onenand_chip *this = mtd->priv;
- dma_addr_t dma_src, dma_dst;
- int bram_offset;
-
- bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
- /* DMA is not used. Revisit PM requirements before enabling it. */
- if (1 || (c->dma_channel < 0) ||
- ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
- (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
- memcpy(buffer, (__force void *)(this->base + bram_offset),
- count);
- return 0;
- }
-
- dma_src = c->phys_base + bram_offset;
- dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
- dev_err(&c->pdev->dev,
- "Couldn't DMA map a %d byte buffer\n",
- count);
- return -1;
- }
-
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count / 4, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
- wait_for_completion(&c->dma_done);
-
- dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
-
- return 0;
-}
-
-static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
- struct onenand_chip *this = mtd->priv;
- dma_addr_t dma_src, dma_dst;
- int bram_offset;
-
- bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
- /* DMA is not used. Revisit PM requirements before enabling it. */
- if (1 || (c->dma_channel < 0) ||
- ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
- (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
- memcpy((__force void *)(this->base + bram_offset), buffer,
- count);
- return 0;
- }
-
- dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
- DMA_TO_DEVICE);
- dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(&c->pdev->dev, dma_src)) {
- dev_err(&c->pdev->dev,
- "Couldn't DMA map a %d byte buffer\n",
- count);
- return -1;
- }
-
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
- count / 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
- wait_for_completion(&c->dma_done);
-
- dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
-
- return 0;
-}
-
-#else
-
-static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- return -ENOSYS;
-}
-
-static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- return -ENOSYS;
-}
-
-#endif
-
-static struct platform_driver omap2_onenand_driver;
-
static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -583,168 +499,117 @@ static void omap2_onenand_shutdown(struct platform_device *pdev)
memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
}
-static int omap2_onenand_enable(struct mtd_info *mtd)
-{
- int ret;
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
-
- ret = regulator_enable(c->regulator);
- if (ret != 0)
- dev_err(&c->pdev->dev, "can't enable regulator\n");
-
- return ret;
-}
-
-static int omap2_onenand_disable(struct mtd_info *mtd)
-{
- int ret;
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
-
- ret = regulator_disable(c->regulator);
- if (ret != 0)
- dev_err(&c->pdev->dev, "can't disable regulator\n");
-
- return ret;
-}
-
static int omap2_onenand_probe(struct platform_device *pdev)
{
- struct omap_onenand_platform_data *pdata;
- struct omap2_onenand *c;
- struct onenand_chip *this;
- int r;
+ u32 val;
+ dma_cap_mask_t mask;
+ int freq, latency, r;
struct resource *res;
+ struct omap2_onenand *c;
+ struct gpmc_onenand_info info;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "error getting memory resource\n");
+ return -EINVAL;
+ }
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata == NULL) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -ENODEV;
+ r = of_property_read_u32(np, "reg", &val);
+ if (r) {
+ dev_err(dev, "reg not found in DT\n");
+ return r;
}
- c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
+ c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
if (!c)
return -ENOMEM;
init_completion(&c->irq_done);
init_completion(&c->dma_done);
- c->flags = pdata->flags;
- c->gpmc_cs = pdata->cs;
- c->gpio_irq = pdata->gpio_irq;
- c->dma_channel = pdata->dma_channel;
- if (c->dma_channel < 0) {
- /* if -1, don't use DMA */
- c->gpio_irq = 0;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- r = -EINVAL;
- dev_err(&pdev->dev, "error getting memory resource\n");
- goto err_kfree;
- }
-
+ c->gpmc_cs = val;
c->phys_base = res->start;
- c->mem_size = resource_size(res);
-
- if (request_mem_region(c->phys_base, c->mem_size,
- pdev->dev.driver->name) == NULL) {
- dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n",
- c->phys_base, c->mem_size);
- r = -EBUSY;
- goto err_kfree;
- }
- c->onenand.base = ioremap(c->phys_base, c->mem_size);
- if (c->onenand.base == NULL) {
- r = -ENOMEM;
- goto err_release_mem_region;
- }
- if (pdata->onenand_setup != NULL) {
- r = pdata->onenand_setup(c->onenand.base, &c->freq);
- if (r < 0) {
- dev_err(&pdev->dev, "Onenand platform setup failed: "
- "%d\n", r);
- goto err_iounmap;
- }
- c->setup = pdata->onenand_setup;
+ c->onenand.base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(c->onenand.base))
+ return PTR_ERR(c->onenand.base);
+
+ c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
+ if (IS_ERR(c->int_gpiod)) {
+ r = PTR_ERR(c->int_gpiod);
+ /* Just try again if this happens */
+ if (r != -EPROBE_DEFER)
+ dev_err(dev, "error getting gpio: %d\n", r);
+ return r;
}
- if (c->gpio_irq) {
- if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
- dev_err(&pdev->dev, "Failed to request GPIO%d for "
- "OneNAND\n", c->gpio_irq);
- goto err_iounmap;
- }
- gpio_direction_input(c->gpio_irq);
+ if (c->int_gpiod) {
+ r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
+ omap2_onenand_interrupt,
+ IRQF_TRIGGER_RISING, "onenand", c);
+ if (r)
+ return r;
- if ((r = request_irq(gpio_to_irq(c->gpio_irq),
- omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
- pdev->dev.driver->name, c)) < 0)
- goto err_release_gpio;
+ c->onenand.wait = omap2_onenand_wait;
}
- if (c->dma_channel >= 0) {
- r = omap_request_dma(0, pdev->dev.driver->name,
- omap2_onenand_dma_cb, (void *) c,
- &c->dma_channel);
- if (r == 0) {
- omap_set_dma_write_mode(c->dma_channel,
- OMAP_DMA_WRITE_NON_POSTED);
- omap_set_dma_src_data_pack(c->dma_channel, 1);
- omap_set_dma_src_burst_mode(c->dma_channel,
- OMAP_DMA_DATA_BURST_8);
- omap_set_dma_dest_data_pack(c->dma_channel, 1);
- omap_set_dma_dest_burst_mode(c->dma_channel,
- OMAP_DMA_DATA_BURST_8);
- } else {
- dev_info(&pdev->dev,
- "failed to allocate DMA for OneNAND, "
- "using PIO instead\n");
- c->dma_channel = -1;
- }
- }
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
- dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
- "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
- c->onenand.base, c->freq);
+ c->dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (c->dma_chan) {
+ c->onenand.read_bufferram = omap2_onenand_read_bufferram;
+ c->onenand.write_bufferram = omap2_onenand_write_bufferram;
+ }
c->pdev = pdev;
c->mtd.priv = &c->onenand;
+ c->mtd.dev.parent = dev;
+ mtd_set_of_node(&c->mtd, dev->of_node);
- c->mtd.dev.parent = &pdev->dev;
- mtd_set_of_node(&c->mtd, pdata->of_node);
-
- this = &c->onenand;
- if (c->dma_channel >= 0) {
- this->wait = omap2_onenand_wait;
- if (c->flags & ONENAND_IN_OMAP34XX) {
- this->read_bufferram = omap3_onenand_read_bufferram;
- this->write_bufferram = omap3_onenand_write_bufferram;
- } else {
- this->read_bufferram = omap2_onenand_read_bufferram;
- this->write_bufferram = omap2_onenand_write_bufferram;
- }
- }
+ dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
+ c->gpmc_cs, c->phys_base, c->onenand.base,
+ c->dma_chan ? "DMA" : "PIO");
- if (pdata->regulator_can_sleep) {
- c->regulator = regulator_get(&pdev->dev, "vonenand");
- if (IS_ERR(c->regulator)) {
- dev_err(&pdev->dev, "Failed to get regulator\n");
- r = PTR_ERR(c->regulator);
- goto err_release_dma;
+ if ((r = onenand_scan(&c->mtd, 1)) < 0)
+ goto err_release_dma;
+
+ freq = omap2_onenand_get_freq(c->onenand.version_id);
+ if (freq > 0) {
+ switch (freq) {
+ case 104:
+ latency = 7;
+ break;
+ case 83:
+ latency = 6;
+ break;
+ case 66:
+ latency = 5;
+ break;
+ case 56:
+ latency = 4;
+ break;
+ default: /* 40 MHz or lower */
+ latency = 3;
+ break;
}
- c->onenand.enable = omap2_onenand_enable;
- c->onenand.disable = omap2_onenand_disable;
- }
- if (pdata->skip_initial_unlocking)
- this->options |= ONENAND_SKIP_INITIAL_UNLOCKING;
+ r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
+ freq, latency, &info);
+ if (r)
+ goto err_release_onenand;
- if ((r = onenand_scan(&c->mtd, 1)) < 0)
- goto err_release_regulator;
+ r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
+ latency, info.burst_len);
+ if (r)
+ goto err_release_onenand;
- r = mtd_device_register(&c->mtd, pdata ? pdata->parts : NULL,
- pdata ? pdata->nr_parts : 0);
+ if (info.sync_read || info.sync_write)
+ dev_info(dev, "optimized timings for %d MHz\n", freq);
+ }
+
+ r = mtd_device_register(&c->mtd, NULL, 0);
if (r)
goto err_release_onenand;
@@ -754,22 +619,9 @@ static int omap2_onenand_probe(struct platform_device *pdev)
err_release_onenand:
onenand_release(&c->mtd);
-err_release_regulator:
- regulator_put(c->regulator);
err_release_dma:
- if (c->dma_channel != -1)
- omap_free_dma(c->dma_channel);
- if (c->gpio_irq)
- free_irq(gpio_to_irq(c->gpio_irq), c);
-err_release_gpio:
- if (c->gpio_irq)
- gpio_free(c->gpio_irq);
-err_iounmap:
- iounmap(c->onenand.base);
-err_release_mem_region:
- release_mem_region(c->phys_base, c->mem_size);
-err_kfree:
- kfree(c);
+ if (c->dma_chan)
+ dma_release_channel(c->dma_chan);
return r;
}
@@ -779,27 +631,26 @@ static int omap2_onenand_remove(struct platform_device *pdev)
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
onenand_release(&c->mtd);
- regulator_put(c->regulator);
- if (c->dma_channel != -1)
- omap_free_dma(c->dma_channel);
+ if (c->dma_chan)
+ dma_release_channel(c->dma_chan);
omap2_onenand_shutdown(pdev);
- if (c->gpio_irq) {
- free_irq(gpio_to_irq(c->gpio_irq), c);
- gpio_free(c->gpio_irq);
- }
- iounmap(c->onenand.base);
- release_mem_region(c->phys_base, c->mem_size);
- kfree(c);
return 0;
}
+static const struct of_device_id omap2_onenand_id_table[] = {
+ { .compatible = "ti,omap2-onenand", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
+
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
.remove = omap2_onenand_remove,
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
+ .of_match_table = omap2_onenand_id_table,
},
};
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 1a6d0e3..979f403 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1383,15 +1383,6 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- /* Do not allow reads past end of device */
- if (unlikely(from >= mtd->size ||
- column + len > ((mtd->size >> this->page_shift) -
- (from >> this->page_shift)) * oobsize)) {
- printk(KERN_ERR "%s: Attempted to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
stats = mtd->ecc_stats;
readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
@@ -1448,38 +1439,6 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
}
/**
- * onenand_read - [MTD Interface] Read data from flash
- * @param mtd MTD device structure
- * @param from offset to read from
- * @param len number of bytes to read
- * @param retlen pointer to variable to store the number of read bytes
- * @param buf the databuffer to put data
- *
- * Read with ecc
-*/
-static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct onenand_chip *this = mtd->priv;
- struct mtd_oob_ops ops = {
- .len = len,
- .ooblen = 0,
- .datbuf = buf,
- .oobbuf = NULL,
- };
- int ret;
-
- onenand_get_device(mtd, FL_READING);
- ret = ONENAND_IS_4KB_PAGE(this) ?
- onenand_mlc_read_ops_nolock(mtd, from, &ops) :
- onenand_read_ops_nolock(mtd, from, &ops);
- onenand_release_device(mtd);
-
- *retlen = ops.retlen;
- return ret;
-}
-
-/**
* onenand_read_oob - [MTD Interface] Read main and/or out-of-band
* @param mtd: MTD device structure
* @param from: offset to read from
@@ -2056,15 +2015,6 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- /* Do not allow reads past end of device */
- if (unlikely(to >= mtd->size ||
- column + len > ((mtd->size >> this->page_shift) -
- (to >> this->page_shift)) * oobsize)) {
- printk(KERN_ERR "%s: Attempted to write past end of device\n",
- __func__);
- return -EINVAL;
- }
-
oobbuf = this->oob_buf;
oobcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
@@ -2129,35 +2079,6 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
}
/**
- * onenand_write - [MTD Interface] write buffer to FLASH
- * @param mtd MTD device structure
- * @param to offset to write to
- * @param len number of bytes to write
- * @param retlen pointer to variable to store the number of written bytes
- * @param buf the data to write
- *
- * Write with ECC
- */
-static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct mtd_oob_ops ops = {
- .len = len,
- .ooblen = 0,
- .datbuf = (u_char *) buf,
- .oobbuf = NULL,
- };
- int ret;
-
- onenand_get_device(mtd, FL_WRITING);
- ret = onenand_write_ops_nolock(mtd, to, &ops);
- onenand_release_device(mtd);
-
- *retlen = ops.retlen;
- return ret;
-}
-
-/**
* onenand_write_oob - [MTD Interface] NAND write data and/or out-of-band
* @param mtd: MTD device structure
* @param to: offset to write
@@ -4038,8 +3959,6 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->_erase = onenand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
- mtd->_read = onenand_read;
- mtd->_write = onenand_write;
mtd->_read_oob = onenand_read_oob;
mtd->_write_oob = onenand_write_oob;
mtd->_panic_write = onenand_panic_write;
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index af0ac1a..2e9d076 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -25,8 +25,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <asm/mach/flash.h>
-
#include "samsung.h"
enum soc_type {
@@ -129,16 +127,13 @@ struct s3c_onenand {
struct platform_device *pdev;
enum soc_type type;
void __iomem *base;
- struct resource *base_res;
void __iomem *ahb_addr;
- struct resource *ahb_res;
int bootram_command;
- void __iomem *page_buf;
- void __iomem *oob_buf;
+ void *page_buf;
+ void *oob_buf;
unsigned int (*mem_addr)(int fba, int fpa, int fsa);
unsigned int (*cmd_map)(unsigned int type, unsigned int val);
void __iomem *dma_addr;
- struct resource *dma_res;
unsigned long phys_base;
struct completion complete;
};
@@ -413,8 +408,8 @@ static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
/*
* Emulate Two BufferRAMs and access with 4 bytes pointer
*/
- m = (unsigned int *) onenand->page_buf;
- s = (unsigned int *) onenand->oob_buf;
+ m = onenand->page_buf;
+ s = onenand->oob_buf;
if (index) {
m += (this->writesize >> 2);
@@ -486,11 +481,11 @@ static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
unsigned char *p;
if (area == ONENAND_DATARAM) {
- p = (unsigned char *) onenand->page_buf;
+ p = onenand->page_buf;
if (index == 1)
p += this->writesize;
} else {
- p = (unsigned char *) onenand->oob_buf;
+ p = onenand->oob_buf;
if (index == 1)
p += mtd->oobsize;
}
@@ -851,15 +846,14 @@ static int s3c_onenand_probe(struct platform_device *pdev)
/* No need to check pdata. the platform data is optional */
size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
- mtd = kzalloc(size, GFP_KERNEL);
+ mtd = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (!mtd)
return -ENOMEM;
- onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
- if (!onenand) {
- err = -ENOMEM;
- goto onenand_fail;
- }
+ onenand = devm_kzalloc(&pdev->dev, sizeof(struct s3c_onenand),
+ GFP_KERNEL);
+ if (!onenand)
+ return -ENOMEM;
this = (struct onenand_chip *) &mtd[1];
mtd->priv = this;
@@ -870,26 +864,12 @@ static int s3c_onenand_probe(struct platform_device *pdev)
s3c_onenand_setup(mtd);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- return -ENOENT;
- goto ahb_resource_failed;
- }
+ onenand->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->base))
+ return PTR_ERR(onenand->base);
- onenand->base_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->base_res) {
- dev_err(&pdev->dev, "failed to request memory resource\n");
- err = -EBUSY;
- goto resource_failed;
- }
+ onenand->phys_base = r->start;
- onenand->base = ioremap(r->start, resource_size(r));
- if (!onenand->base) {
- dev_err(&pdev->dev, "failed to map memory resource\n");
- err = -EFAULT;
- goto ioremap_failed;
- }
/* Set onenand_chip also */
this->base = onenand->base;
@@ -898,40 +878,20 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (onenand->type != TYPE_S5PC110) {
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!r) {
- dev_err(&pdev->dev, "no buffer memory resource defined\n");
- err = -ENOENT;
- goto ahb_resource_failed;
- }
-
- onenand->ahb_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->ahb_res) {
- dev_err(&pdev->dev, "failed to request buffer memory resource\n");
- err = -EBUSY;
- goto ahb_resource_failed;
- }
-
- onenand->ahb_addr = ioremap(r->start, resource_size(r));
- if (!onenand->ahb_addr) {
- dev_err(&pdev->dev, "failed to map buffer memory resource\n");
- err = -EINVAL;
- goto ahb_ioremap_failed;
- }
+ onenand->ahb_addr = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->ahb_addr))
+ return PTR_ERR(onenand->ahb_addr);
/* Allocate 4KiB BufferRAM */
- onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL);
- if (!onenand->page_buf) {
- err = -ENOMEM;
- goto page_buf_fail;
- }
+ onenand->page_buf = devm_kzalloc(&pdev->dev, SZ_4K,
+ GFP_KERNEL);
+ if (!onenand->page_buf)
+ return -ENOMEM;
/* Allocate 128 SpareRAM */
- onenand->oob_buf = kzalloc(128, GFP_KERNEL);
- if (!onenand->oob_buf) {
- err = -ENOMEM;
- goto oob_buf_fail;
- }
+ onenand->oob_buf = devm_kzalloc(&pdev->dev, 128, GFP_KERNEL);
+ if (!onenand->oob_buf)
+ return -ENOMEM;
/* S3C doesn't handle subpage write */
mtd->subpage_sft = 0;
@@ -939,28 +899,9 @@ static int s3c_onenand_probe(struct platform_device *pdev)
} else { /* S5PC110 */
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!r) {
- dev_err(&pdev->dev, "no dma memory resource defined\n");
- err = -ENOENT;
- goto dma_resource_failed;
- }
-
- onenand->dma_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->dma_res) {
- dev_err(&pdev->dev, "failed to request dma memory resource\n");
- err = -EBUSY;
- goto dma_resource_failed;
- }
-
- onenand->dma_addr = ioremap(r->start, resource_size(r));
- if (!onenand->dma_addr) {
- dev_err(&pdev->dev, "failed to map dma memory resource\n");
- err = -EINVAL;
- goto dma_ioremap_failed;
- }
-
- onenand->phys_base = onenand->base_res->start;
+ onenand->dma_addr = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->dma_addr))
+ return PTR_ERR(onenand->dma_addr);
s5pc110_dma_ops = s5pc110_dma_poll;
/* Interrupt support */
@@ -968,19 +909,20 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (r) {
init_completion(&onenand->complete);
s5pc110_dma_ops = s5pc110_dma_irq;
- err = request_irq(r->start, s5pc110_onenand_irq,
- IRQF_SHARED, "onenand", &onenand);
+ err = devm_request_irq(&pdev->dev, r->start,
+ s5pc110_onenand_irq,
+ IRQF_SHARED, "onenand",
+ &onenand);
if (err) {
dev_err(&pdev->dev, "failed to get irq\n");
- goto scan_failed;
+ return err;
}
}
}
- if (onenand_scan(mtd, 1)) {
- err = -EFAULT;
- goto scan_failed;
- }
+ err = onenand_scan(mtd, 1);
+ if (err)
+ return err;
if (onenand->type != TYPE_S5PC110) {
/* S3C doesn't handle subpage write */
@@ -994,40 +936,15 @@ static int s3c_onenand_probe(struct platform_device *pdev)
err = mtd_device_parse_register(mtd, NULL, NULL,
pdata ? pdata->parts : NULL,
pdata ? pdata->nr_parts : 0);
+ if (err) {
+ dev_err(&pdev->dev, "failed to parse partitions and register the MTD device\n");
+ onenand_release(mtd);
+ return err;
+ }
platform_set_drvdata(pdev, mtd);
return 0;
-
-scan_failed:
- if (onenand->dma_addr)
- iounmap(onenand->dma_addr);
-dma_ioremap_failed:
- if (onenand->dma_res)
- release_mem_region(onenand->dma_res->start,
- resource_size(onenand->dma_res));
- kfree(onenand->oob_buf);
-oob_buf_fail:
- kfree(onenand->page_buf);
-page_buf_fail:
- if (onenand->ahb_addr)
- iounmap(onenand->ahb_addr);
-ahb_ioremap_failed:
- if (onenand->ahb_res)
- release_mem_region(onenand->ahb_res->start,
- resource_size(onenand->ahb_res));
-dma_resource_failed:
-ahb_resource_failed:
- iounmap(onenand->base);
-ioremap_failed:
- if (onenand->base_res)
- release_mem_region(onenand->base_res->start,
- resource_size(onenand->base_res));
-resource_failed:
- kfree(onenand);
-onenand_fail:
- kfree(mtd);
- return err;
}
static int s3c_onenand_remove(struct platform_device *pdev)
@@ -1035,25 +952,7 @@ static int s3c_onenand_remove(struct platform_device *pdev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
onenand_release(mtd);
- if (onenand->ahb_addr)
- iounmap(onenand->ahb_addr);
- if (onenand->ahb_res)
- release_mem_region(onenand->ahb_res->start,
- resource_size(onenand->ahb_res));
- if (onenand->dma_addr)
- iounmap(onenand->dma_addr);
- if (onenand->dma_res)
- release_mem_region(onenand->dma_res->start,
- resource_size(onenand->dma_res));
-
- iounmap(onenand->base);
- release_mem_region(onenand->base_res->start,
- resource_size(onenand->base_res));
-
- kfree(onenand->oob_buf);
- kfree(onenand->page_buf);
- kfree(onenand);
- kfree(mtd);
+
return 0;
}
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
index 5fe0079..8893dc8 100644
--- a/drivers/mtd/parsers/sharpslpart.c
+++ b/drivers/mtd/parsers/sharpslpart.c
@@ -192,7 +192,7 @@ static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
/* create physical-logical table */
for (block_num = 0; block_num < phymax; block_num++) {
- block_adr = block_num * mtd->erasesize;
+ block_adr = (loff_t)block_num * mtd->erasesize;
if (mtd_block_isbad(mtd, block_adr))
continue;
@@ -219,7 +219,7 @@ exit:
return ret;
}
-void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
+static void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
{
kfree(ftl->log2phy);
}
@@ -244,7 +244,7 @@ static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
return -EINVAL;
block_num = ftl->log2phy[log_num];
- block_adr = block_num * mtd->erasesize;
+ block_adr = (loff_t)block_num * mtd->erasesize;
block_ofs = mtd_mod_by_eb((u32)from, mtd);
err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 75a2bc4..4b8e918 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -58,6 +58,7 @@ struct cqspi_flash_pdata {
u8 data_width;
u8 cs;
bool registered;
+ bool use_direct_mode;
};
struct cqspi_st {
@@ -68,6 +69,7 @@ struct cqspi_st {
void __iomem *iobase;
void __iomem *ahb_base;
+ resource_size_t ahb_size;
struct completion transfer_complete;
struct mutex bus_mutex;
@@ -103,6 +105,7 @@ struct cqspi_st {
/* Register map */
#define CQSPI_REG_CONFIG 0x00
#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
+#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
@@ -450,8 +453,7 @@ static int cqspi_command_write_addr(struct spi_nor *nor,
return cqspi_exec_flash_cmd(cqspi, reg);
}
-static int cqspi_indirect_read_setup(struct spi_nor *nor,
- const unsigned int from_addr)
+static int cqspi_read_setup(struct spi_nor *nor)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
@@ -459,8 +461,6 @@ static int cqspi_indirect_read_setup(struct spi_nor *nor,
unsigned int dummy_clk = 0;
unsigned int reg;
- writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
-
reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
@@ -493,8 +493,8 @@ static int cqspi_indirect_read_setup(struct spi_nor *nor,
return 0;
}
-static int cqspi_indirect_read_execute(struct spi_nor *nor,
- u8 *rxbuf, const unsigned n_rx)
+static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
+ loff_t from_addr, const size_t n_rx)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
@@ -504,6 +504,7 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor,
unsigned int bytes_to_read = 0;
int ret = 0;
+ writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
/* Clear all interrupts. */
@@ -570,8 +571,7 @@ failrd:
return ret;
}
-static int cqspi_indirect_write_setup(struct spi_nor *nor,
- const unsigned int to_addr)
+static int cqspi_write_setup(struct spi_nor *nor)
{
unsigned int reg;
struct cqspi_flash_pdata *f_pdata = nor->priv;
@@ -584,8 +584,6 @@ static int cqspi_indirect_write_setup(struct spi_nor *nor,
reg = cqspi_calc_rdreg(nor, nor->program_opcode);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
- writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
-
reg = readl(reg_base + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
reg |= (nor->addr_width - 1);
@@ -593,8 +591,8 @@ static int cqspi_indirect_write_setup(struct spi_nor *nor,
return 0;
}
-static int cqspi_indirect_write_execute(struct spi_nor *nor,
- const u8 *txbuf, const unsigned n_tx)
+static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
+ const u8 *txbuf, const size_t n_tx)
{
const unsigned int page_size = nor->page_size;
struct cqspi_flash_pdata *f_pdata = nor->priv;
@@ -604,6 +602,7 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
unsigned int write_bytes;
int ret;
+ writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
/* Clear all interrupts. */
@@ -894,17 +893,22 @@ static int cqspi_set_protocol(struct spi_nor *nor, const int read)
static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
size_t len, const u_char *buf)
{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
ret = cqspi_set_protocol(nor, 0);
if (ret)
return ret;
- ret = cqspi_indirect_write_setup(nor, to);
+ ret = cqspi_write_setup(nor);
if (ret)
return ret;
- ret = cqspi_indirect_write_execute(nor, buf, len);
+ if (f_pdata->use_direct_mode)
+ memcpy_toio(cqspi->ahb_base + to, buf, len);
+ else
+ ret = cqspi_indirect_write_execute(nor, to, buf, len);
if (ret)
return ret;
@@ -914,17 +918,22 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
size_t len, u_char *buf)
{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
ret = cqspi_set_protocol(nor, 1);
if (ret)
return ret;
- ret = cqspi_indirect_read_setup(nor, from);
+ ret = cqspi_read_setup(nor);
if (ret)
return ret;
- ret = cqspi_indirect_read_execute(nor, buf, len);
+ if (f_pdata->use_direct_mode)
+ memcpy_fromio(buf, cqspi->ahb_base + from, len);
+ else
+ ret = cqspi_indirect_read_execute(nor, buf, from, len);
if (ret)
return ret;
@@ -1059,6 +1068,8 @@ static int cqspi_of_get_pdata(struct platform_device *pdev)
static void cqspi_controller_init(struct cqspi_st *cqspi)
{
+ u32 reg;
+
cqspi_controller_enable(cqspi, 0);
/* Configure the remap address register, no remap */
@@ -1081,6 +1092,11 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
+ /* Enable Direct Access Controller */
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
cqspi_controller_enable(cqspi, 1);
}
@@ -1156,6 +1172,12 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
goto err;
f_pdata->registered = true;
+
+ if (mtd->size <= cqspi->ahb_size) {
+ f_pdata->use_direct_mode = true;
+ dev_dbg(nor->dev, "using direct mode for %s\n",
+ mtd->name);
+ }
}
return 0;
@@ -1215,6 +1237,7 @@ static int cqspi_probe(struct platform_device *pdev)
dev_err(dev, "Cannot remap AHB address.\n");
return PTR_ERR(cqspi->ahb_base);
}
+ cqspi->ahb_size = resource_size(res_ahb);
init_completion(&cqspi->transfer_complete);
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index f17d224..2901c7b 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -801,10 +801,10 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
}
static const struct of_device_id fsl_qspi_dt_ids[] = {
- { .compatible = "fsl,vf610-qspi", .data = (void *)&vybrid_data, },
- { .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
- { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, },
- { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, },
+ { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
+ { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
+ { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
+ { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
{ .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
{ /* sentinel */ }
};
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index ef034d8..6999515 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -138,7 +138,6 @@
* @erase_64k: 64k erase supported
* @opcodes: Opcodes which are supported. This are programmed by BIOS
* before it locks down the controller.
- * @preopcodes: Preopcodes which are supported.
*/
struct intel_spi {
struct device *dev;
@@ -155,7 +154,6 @@ struct intel_spi {
bool swseq_erase;
bool erase_64k;
u8 opcodes[8];
- u8 preopcodes[2];
};
static bool writeable;
@@ -400,10 +398,6 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->opcodes[i] = opmenu0 >> i * 8;
ispi->opcodes[i + 4] = opmenu1 >> i * 8;
}
-
- val = readl(ispi->sregs + PREOP_OPTYPE);
- ispi->preopcodes[0] = val;
- ispi->preopcodes[1] = val >> 8;
}
}
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index abe455c..5442993 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -110,7 +110,7 @@
#define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n))
#define MTK_NOR_SHREG(n) (MTK_NOR_SHREG0_REG + 4 * (n))
-struct mt8173_nor {
+struct mtk_nor {
struct spi_nor nor;
struct device *dev;
void __iomem *base; /* nor flash base address */
@@ -118,48 +118,48 @@ struct mt8173_nor {
struct clk *nor_clk;
};
-static void mt8173_nor_set_read_mode(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_set_read_mode(struct mtk_nor *mtk_nor)
{
- struct spi_nor *nor = &mt8173_nor->nor;
+ struct spi_nor *nor = &mtk_nor->nor;
switch (nor->read_proto) {
case SNOR_PROTO_1_1_1:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA3_REG);
- writeb(MTK_NOR_FAST_READ, mt8173_nor->base +
+ writeb(MTK_NOR_FAST_READ, mtk_nor->base +
MTK_NOR_CFG1_REG);
break;
case SNOR_PROTO_1_1_2:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA3_REG);
- writeb(MTK_NOR_DUAL_READ_EN, mt8173_nor->base +
+ writeb(MTK_NOR_DUAL_READ_EN, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
case SNOR_PROTO_1_1_4:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA4_REG);
- writeb(MTK_NOR_QUAD_READ_EN, mt8173_nor->base +
+ writeb(MTK_NOR_QUAD_READ_EN, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
default:
- writeb(MTK_NOR_DUAL_DISABLE, mt8173_nor->base +
+ writeb(MTK_NOR_DUAL_DISABLE, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
}
}
-static int mt8173_nor_execute_cmd(struct mt8173_nor *mt8173_nor, u8 cmdval)
+static int mtk_nor_execute_cmd(struct mtk_nor *mtk_nor, u8 cmdval)
{
int reg;
u8 val = cmdval & 0x1f;
- writeb(cmdval, mt8173_nor->base + MTK_NOR_CMD_REG);
- return readl_poll_timeout(mt8173_nor->base + MTK_NOR_CMD_REG, reg,
+ writeb(cmdval, mtk_nor->base + MTK_NOR_CMD_REG);
+ return readl_poll_timeout(mtk_nor->base + MTK_NOR_CMD_REG, reg,
!(reg & val), 100, 10000);
}
-static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
- u8 *tx, int txlen, u8 *rx, int rxlen)
+static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op,
+ u8 *tx, int txlen, u8 *rx, int rxlen)
{
int len = 1 + txlen + rxlen;
int i, ret, idx;
@@ -167,26 +167,26 @@ static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
if (len > MTK_NOR_MAX_SHIFT)
return -EINVAL;
- writeb(len * 8, mt8173_nor->base + MTK_NOR_CNT_REG);
+ writeb(len * 8, mtk_nor->base + MTK_NOR_CNT_REG);
/* start at PRGDATA5, go down to PRGDATA0 */
idx = MTK_NOR_MAX_RX_TX_SHIFT - 1;
/* opcode */
- writeb(op, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(op, mtk_nor->base + MTK_NOR_PRG_REG(idx));
idx--;
/* program TX data */
for (i = 0; i < txlen; i++, idx--)
- writeb(tx[i], mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(tx[i], mtk_nor->base + MTK_NOR_PRG_REG(idx));
/* clear out rest of TX registers */
while (idx >= 0) {
- writeb(0, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(0, mtk_nor->base + MTK_NOR_PRG_REG(idx));
idx--;
}
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PRG_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PRG_CMD);
if (ret)
return ret;
@@ -195,20 +195,20 @@ static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
/* read out RX data */
for (i = 0; i < rxlen; i++, idx--)
- rx[i] = readb(mt8173_nor->base + MTK_NOR_SHREG(idx));
+ rx[i] = readb(mtk_nor->base + MTK_NOR_SHREG(idx));
return 0;
}
/* Do a WRSR (Write Status Register) command */
-static int mt8173_nor_wr_sr(struct mt8173_nor *mt8173_nor, u8 sr)
+static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, u8 sr)
{
- writeb(sr, mt8173_nor->base + MTK_NOR_PRGDATA5_REG);
- writeb(8, mt8173_nor->base + MTK_NOR_CNT_REG);
- return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WRSR_CMD);
+ writeb(sr, mtk_nor->base + MTK_NOR_PRGDATA5_REG);
+ writeb(8, mtk_nor->base + MTK_NOR_CNT_REG);
+ return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WRSR_CMD);
}
-static int mt8173_nor_write_buffer_enable(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_write_buffer_enable(struct mtk_nor *mtk_nor)
{
u8 reg;
@@ -216,27 +216,27 @@ static int mt8173_nor_write_buffer_enable(struct mt8173_nor *mt8173_nor)
* 0: pre-fetch buffer use for read
* 1: pre-fetch buffer use for page program
*/
- writel(MTK_NOR_WR_BUF_ENABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
- return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
+ writel(MTK_NOR_WR_BUF_ENABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
+ return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
0x01 == (reg & 0x01), 100, 10000);
}
-static int mt8173_nor_write_buffer_disable(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_write_buffer_disable(struct mtk_nor *mtk_nor)
{
u8 reg;
- writel(MTK_NOR_WR_BUF_DISABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
- return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
+ writel(MTK_NOR_WR_BUF_DISABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
+ return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
MTK_NOR_WR_BUF_DISABLE == (reg & 0x1), 100,
10000);
}
-static void mt8173_nor_set_addr_width(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_set_addr_width(struct mtk_nor *mtk_nor)
{
u8 val;
- struct spi_nor *nor = &mt8173_nor->nor;
+ struct spi_nor *nor = &mtk_nor->nor;
- val = readb(mt8173_nor->base + MTK_NOR_DUAL_REG);
+ val = readb(mtk_nor->base + MTK_NOR_DUAL_REG);
switch (nor->addr_width) {
case 3:
@@ -246,115 +246,115 @@ static void mt8173_nor_set_addr_width(struct mt8173_nor *mt8173_nor)
val |= MTK_NOR_4B_ADDR_EN;
break;
default:
- dev_warn(mt8173_nor->dev, "Unexpected address width %u.\n",
+ dev_warn(mtk_nor->dev, "Unexpected address width %u.\n",
nor->addr_width);
break;
}
- writeb(val, mt8173_nor->base + MTK_NOR_DUAL_REG);
+ writeb(val, mtk_nor->base + MTK_NOR_DUAL_REG);
}
-static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr)
+static void mtk_nor_set_addr(struct mtk_nor *mtk_nor, u32 addr)
{
int i;
- mt8173_nor_set_addr_width(mt8173_nor);
+ mtk_nor_set_addr_width(mtk_nor);
for (i = 0; i < 3; i++) {
- writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR0_REG + i * 4);
+ writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR0_REG + i * 4);
addr >>= 8;
}
/* Last register is non-contiguous */
- writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG);
+ writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR3_REG);
}
-static ssize_t mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
- u_char *buffer)
+static ssize_t mtk_nor_read(struct spi_nor *nor, loff_t from, size_t length,
+ u_char *buffer)
{
int i, ret;
int addr = (int)from;
u8 *buf = (u8 *)buffer;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
/* set mode for fast read mode ,dual mode or quad mode */
- mt8173_nor_set_read_mode(mt8173_nor);
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_read_mode(mtk_nor);
+ mtk_nor_set_addr(mtk_nor, addr);
for (i = 0; i < length; i++) {
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_READ_CMD);
if (ret < 0)
return ret;
- buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG);
+ buf[i] = readb(mtk_nor->base + MTK_NOR_RDATA_REG);
}
return length;
}
-static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor,
- int addr, int length, u8 *data)
+static int mtk_nor_write_single_byte(struct mtk_nor *mtk_nor,
+ int addr, int length, u8 *data)
{
int i, ret;
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_addr(mtk_nor, addr);
for (i = 0; i < length; i++) {
- writeb(*data++, mt8173_nor->base + MTK_NOR_WDATA_REG);
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_WR_CMD);
+ writeb(*data++, mtk_nor->base + MTK_NOR_WDATA_REG);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_WR_CMD);
if (ret < 0)
return ret;
}
return 0;
}
-static int mt8173_nor_write_buffer(struct mt8173_nor *mt8173_nor, int addr,
- const u8 *buf)
+static int mtk_nor_write_buffer(struct mtk_nor *mtk_nor, int addr,
+ const u8 *buf)
{
int i, bufidx, data;
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_addr(mtk_nor, addr);
bufidx = 0;
for (i = 0; i < SFLASH_WRBUF_SIZE; i += 4) {
data = buf[bufidx + 3]<<24 | buf[bufidx + 2]<<16 |
buf[bufidx + 1]<<8 | buf[bufidx];
bufidx += 4;
- writel(data, mt8173_nor->base + MTK_NOR_PP_DATA_REG);
+ writel(data, mtk_nor->base + MTK_NOR_PP_DATA_REG);
}
- return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD);
+ return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WR_CMD);
}
-static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
- const u_char *buf)
+static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
size_t i;
- ret = mt8173_nor_write_buffer_enable(mt8173_nor);
+ ret = mtk_nor_write_buffer_enable(mtk_nor);
if (ret < 0) {
- dev_warn(mt8173_nor->dev, "write buffer enable failed!\n");
+ dev_warn(mtk_nor->dev, "write buffer enable failed!\n");
return ret;
}
for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) {
- ret = mt8173_nor_write_buffer(mt8173_nor, to, buf);
+ ret = mtk_nor_write_buffer(mtk_nor, to, buf);
if (ret < 0) {
- dev_err(mt8173_nor->dev, "write buffer failed!\n");
+ dev_err(mtk_nor->dev, "write buffer failed!\n");
return ret;
}
to += SFLASH_WRBUF_SIZE;
buf += SFLASH_WRBUF_SIZE;
}
- ret = mt8173_nor_write_buffer_disable(mt8173_nor);
+ ret = mtk_nor_write_buffer_disable(mtk_nor);
if (ret < 0) {
- dev_warn(mt8173_nor->dev, "write buffer disable failed!\n");
+ dev_warn(mtk_nor->dev, "write buffer disable failed!\n");
return ret;
}
if (i < len) {
- ret = mt8173_nor_write_single_byte(mt8173_nor, to,
- (int)(len - i), (u8 *)buf);
+ ret = mtk_nor_write_single_byte(mtk_nor, to,
+ (int)(len - i), (u8 *)buf);
if (ret < 0) {
- dev_err(mt8173_nor->dev, "write single byte failed!\n");
+ dev_err(mtk_nor->dev, "write single byte failed!\n");
return ret;
}
}
@@ -362,72 +362,72 @@ static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
return len;
}
-static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
switch (opcode) {
case SPINOR_OP_RDSR:
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_RDSR_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_RDSR_CMD);
if (ret < 0)
return ret;
if (len == 1)
- *buf = readb(mt8173_nor->base + MTK_NOR_RDSR_REG);
+ *buf = readb(mtk_nor->base + MTK_NOR_RDSR_REG);
else
- dev_err(mt8173_nor->dev, "len should be 1 for read status!\n");
+ dev_err(mtk_nor->dev, "len should be 1 for read status!\n");
break;
default:
- ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, NULL, 0, buf, len);
+ ret = mtk_nor_do_tx_rx(mtk_nor, opcode, NULL, 0, buf, len);
break;
}
return ret;
}
-static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- int len)
+static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ int len)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
switch (opcode) {
case SPINOR_OP_WRSR:
/* We only handle 1 byte */
- ret = mt8173_nor_wr_sr(mt8173_nor, *buf);
+ ret = mtk_nor_wr_sr(mtk_nor, *buf);
break;
default:
- ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, buf, len, NULL, 0);
+ ret = mtk_nor_do_tx_rx(mtk_nor, opcode, buf, len, NULL, 0);
if (ret)
- dev_warn(mt8173_nor->dev, "write reg failure!\n");
+ dev_warn(mtk_nor->dev, "write reg failure!\n");
break;
}
return ret;
}
-static void mt8173_nor_disable_clk(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_disable_clk(struct mtk_nor *mtk_nor)
{
- clk_disable_unprepare(mt8173_nor->spi_clk);
- clk_disable_unprepare(mt8173_nor->nor_clk);
+ clk_disable_unprepare(mtk_nor->spi_clk);
+ clk_disable_unprepare(mtk_nor->nor_clk);
}
-static int mt8173_nor_enable_clk(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_enable_clk(struct mtk_nor *mtk_nor)
{
int ret;
- ret = clk_prepare_enable(mt8173_nor->spi_clk);
+ ret = clk_prepare_enable(mtk_nor->spi_clk);
if (ret)
return ret;
- ret = clk_prepare_enable(mt8173_nor->nor_clk);
+ ret = clk_prepare_enable(mtk_nor->nor_clk);
if (ret) {
- clk_disable_unprepare(mt8173_nor->spi_clk);
+ clk_disable_unprepare(mtk_nor->spi_clk);
return ret;
}
return 0;
}
-static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
+static int mtk_nor_init(struct mtk_nor *mtk_nor,
struct device_node *flash_node)
{
const struct spi_nor_hwcaps hwcaps = {
@@ -439,18 +439,18 @@ static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
struct spi_nor *nor;
/* initialize controller to accept commands */
- writel(MTK_NOR_ENABLE_SF_CMD, mt8173_nor->base + MTK_NOR_WRPROT_REG);
+ writel(MTK_NOR_ENABLE_SF_CMD, mtk_nor->base + MTK_NOR_WRPROT_REG);
- nor = &mt8173_nor->nor;
- nor->dev = mt8173_nor->dev;
- nor->priv = mt8173_nor;
+ nor = &mtk_nor->nor;
+ nor->dev = mtk_nor->dev;
+ nor->priv = mtk_nor;
spi_nor_set_flash_node(nor, flash_node);
/* fill the hooks to spi nor */
- nor->read = mt8173_nor_read;
- nor->read_reg = mt8173_nor_read_reg;
- nor->write = mt8173_nor_write;
- nor->write_reg = mt8173_nor_write_reg;
+ nor->read = mtk_nor_read;
+ nor->read_reg = mtk_nor_read_reg;
+ nor->write = mtk_nor_write;
+ nor->write_reg = mtk_nor_write_reg;
nor->mtd.name = "mtk_nor";
/* initialized with NULL */
ret = spi_nor_scan(nor, NULL, &hwcaps);
@@ -465,34 +465,34 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
struct device_node *flash_np;
struct resource *res;
int ret;
- struct mt8173_nor *mt8173_nor;
+ struct mtk_nor *mtk_nor;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No DT found\n");
return -EINVAL;
}
- mt8173_nor = devm_kzalloc(&pdev->dev, sizeof(*mt8173_nor), GFP_KERNEL);
- if (!mt8173_nor)
+ mtk_nor = devm_kzalloc(&pdev->dev, sizeof(*mtk_nor), GFP_KERNEL);
+ if (!mtk_nor)
return -ENOMEM;
- platform_set_drvdata(pdev, mt8173_nor);
+ platform_set_drvdata(pdev, mtk_nor);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mt8173_nor->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mt8173_nor->base))
- return PTR_ERR(mt8173_nor->base);
+ mtk_nor->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mtk_nor->base))
+ return PTR_ERR(mtk_nor->base);
- mt8173_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
- if (IS_ERR(mt8173_nor->spi_clk))
- return PTR_ERR(mt8173_nor->spi_clk);
+ mtk_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(mtk_nor->spi_clk))
+ return PTR_ERR(mtk_nor->spi_clk);
- mt8173_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
- if (IS_ERR(mt8173_nor->nor_clk))
- return PTR_ERR(mt8173_nor->nor_clk);
+ mtk_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
+ if (IS_ERR(mtk_nor->nor_clk))
+ return PTR_ERR(mtk_nor->nor_clk);
- mt8173_nor->dev = &pdev->dev;
+ mtk_nor->dev = &pdev->dev;
- ret = mt8173_nor_enable_clk(mt8173_nor);
+ ret = mtk_nor_enable_clk(mtk_nor);
if (ret)
return ret;
@@ -503,20 +503,20 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
ret = -ENODEV;
goto nor_free;
}
- ret = mtk_nor_init(mt8173_nor, flash_np);
+ ret = mtk_nor_init(mtk_nor, flash_np);
nor_free:
if (ret)
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return ret;
}
static int mtk_nor_drv_remove(struct platform_device *pdev)
{
- struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
+ struct mtk_nor *mtk_nor = platform_get_drvdata(pdev);
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return 0;
}
@@ -524,18 +524,18 @@ static int mtk_nor_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int mtk_nor_suspend(struct device *dev)
{
- struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+ struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return 0;
}
static int mtk_nor_resume(struct device *dev)
{
- struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+ struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
- return mt8173_nor_enable_clk(mt8173_nor);
+ return mtk_nor_enable_clk(mtk_nor);
}
static const struct dev_pm_ops mtk_nor_dev_pm_ops = {
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index bc266f7..d445a4d 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -330,8 +330,22 @@ static inline int spi_nor_fsr_ready(struct spi_nor *nor)
int fsr = read_fsr(nor);
if (fsr < 0)
return fsr;
- else
- return fsr & FSR_READY;
+
+ if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
+ if (fsr & FSR_E_ERR)
+ dev_err(nor->dev, "Erase operation failed.\n");
+ else
+ dev_err(nor->dev, "Program operation failed.\n");
+
+ if (fsr & FSR_PT_ERR)
+ dev_err(nor->dev,
+ "Attempted to modify a protected sector.\n");
+
+ nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
+ return -EIO;
+ }
+
+ return fsr & FSR_READY;
}
static int spi_nor_ready(struct spi_nor *nor)
@@ -552,6 +566,27 @@ erase_err:
return ret;
}
+/* Write status register and ensure bits in mask match written values */
+static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
+{
+ int ret;
+
+ write_enable(nor);
+ ret = write_sr(nor, status_new);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ ret = read_sr(nor);
+ if (ret < 0)
+ return ret;
+
+ return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
+}
+
static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
uint64_t *len)
{
@@ -650,7 +685,6 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- int ret;
status_old = read_sr(nor);
if (status_old < 0)
@@ -714,11 +748,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) < (status_old & mask))
return -EINVAL;
- write_enable(nor);
- ret = write_sr(nor, status_new);
- if (ret)
- return ret;
- return spi_nor_wait_till_ready(nor);
+ return write_sr_and_check(nor, status_new, mask);
}
/*
@@ -735,7 +765,6 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- int ret;
status_old = read_sr(nor);
if (status_old < 0)
@@ -802,11 +831,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) > (status_old & mask))
return -EINVAL;
- write_enable(nor);
- ret = write_sr(nor, status_new);
- if (ret)
- return ret;
- return spi_nor_wait_till_ready(nor);
+ return write_sr_and_check(nor, status_new, mask);
}
/*
@@ -1020,7 +1045,13 @@ static const struct flash_info spi_nor_ids[] = {
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
/* ISSI */
- { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ) },
/* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
@@ -1065,7 +1096,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
- /* Spansion -- single (large) sector size only, at least
+ /* Spansion/Cypress -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
@@ -1094,6 +1125,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -2713,6 +2746,16 @@ static void spi_nor_resume(struct mtd_info *mtd)
dev_err(dev, "resume() failed\n");
}
+void spi_nor_restore(struct spi_nor *nor)
+{
+ /* restore the addressing mode */
+ if ((nor->addr_width == 4) &&
+ (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+ !(nor->info->flags & SPI_NOR_4B_OPCODES))
+ set_4byte(nor, nor->info, 0);
+}
+EXPORT_SYMBOL_GPL(spi_nor_restore);
+
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 5f03b8c..cde19c9 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -151,7 +151,7 @@ static int read_page(int log)
memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer);
- if (err == -EUCLEAN)
+ if (!err || err == -EUCLEAN)
err = mtd->ecc_stats.corrected - oldstats.corrected;
if (err < 0 || read != mtd->writesize) {
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 1cb3f77..766b2c3 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -193,6 +193,9 @@ static int verify_eraseblock(int ebnum)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != use_len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -227,6 +230,9 @@ static int verify_eraseblock(int ebnum)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != mtd->oobavail) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -286,6 +292,9 @@ static int verify_eraseblock_in_one_go(int ebnum)
/* read entire block's OOB at one go */
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -527,6 +536,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to start read past end of OOB\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, addr0, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -571,6 +583,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -615,6 +630,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -684,6 +702,9 @@ static int __init mtd_oobtest_init(void)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err)
goto out;
if (memcmpshow(addr, readbuf, writebuf,
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index b210fdb..b1fc28f 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -99,6 +99,8 @@ struct ubiblock {
/* Linked list of all ubiblock instances */
static LIST_HEAD(ubiblock_devices);
+static DEFINE_IDR(ubiblock_minor_idr);
+/* Protects ubiblock_devices and ubiblock_minor_idr */
static DEFINE_MUTEX(devices_mutex);
static int ubiblock_major;
@@ -351,8 +353,6 @@ static const struct blk_mq_ops ubiblock_mq_ops = {
.init_request = ubiblock_init_request,
};
-static DEFINE_IDR(ubiblock_minor_idr);
-
int ubiblock_create(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
@@ -365,14 +365,15 @@ int ubiblock_create(struct ubi_volume_info *vi)
/* Check that the volume isn't already handled */
mutex_lock(&devices_mutex);
if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
- mutex_unlock(&devices_mutex);
- return -EEXIST;
+ ret = -EEXIST;
+ goto out_unlock;
}
- mutex_unlock(&devices_mutex);
dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
+ if (!dev) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
mutex_init(&dev->dev_mutex);
@@ -437,14 +438,13 @@ int ubiblock_create(struct ubi_volume_info *vi)
goto out_free_queue;
}
- mutex_lock(&devices_mutex);
list_add_tail(&dev->list, &ubiblock_devices);
- mutex_unlock(&devices_mutex);
/* Must be the last step: anyone can call file ops from now on */
add_disk(dev->gd);
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
dev->ubi_num, dev->vol_id, vi->name);
+ mutex_unlock(&devices_mutex);
return 0;
out_free_queue:
@@ -457,6 +457,8 @@ out_put_disk:
put_disk(dev->gd);
out_free_dev:
kfree(dev);
+out_unlock:
+ mutex_unlock(&devices_mutex);
return ret;
}
@@ -478,30 +480,36 @@ static void ubiblock_cleanup(struct ubiblock *dev)
int ubiblock_remove(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
+ int ret;
mutex_lock(&devices_mutex);
dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
if (!dev) {
- mutex_unlock(&devices_mutex);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_unlock;
}
/* Found a device, let's lock it so we can check if it's busy */
mutex_lock(&dev->dev_mutex);
if (dev->refcnt > 0) {
- mutex_unlock(&dev->dev_mutex);
- mutex_unlock(&devices_mutex);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_unlock_dev;
}
/* Remove from device list */
list_del(&dev->list);
- mutex_unlock(&devices_mutex);
-
ubiblock_cleanup(dev);
mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+
kfree(dev);
return 0;
+
+out_unlock_dev:
+ mutex_unlock(&dev->dev_mutex);
+out_unlock:
+ mutex_unlock(&devices_mutex);
+ return ret;
}
static int ubiblock_resize(struct ubi_volume_info *vi)
@@ -630,6 +638,7 @@ static void ubiblock_remove_all(void)
struct ubiblock *next;
struct ubiblock *dev;
+ mutex_lock(&devices_mutex);
list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
/* The module is being forcefully removed */
WARN_ON(dev->desc);
@@ -638,6 +647,7 @@ static void ubiblock_remove_all(void)
ubiblock_cleanup(dev);
kfree(dev);
}
+ mutex_unlock(&devices_mutex);
}
int __init ubiblock_init(void)
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 136ce05..e941395 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -535,8 +535,17 @@ static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
int limit, device_pebs;
uint64_t device_size;
- if (!max_beb_per1024)
- return 0;
+ if (!max_beb_per1024) {
+ /*
+ * Since max_beb_per1024 has not been set by the user in either
+ * the cmdline or Kconfig, use mtd_max_bad_blocks to set the
+ * limit if it is supported by the device.
+ */
+ limit = mtd_max_bad_blocks(ubi->mtd, 0, ubi->mtd->size);
+ if (limit < 0)
+ return 0;
+ return limit;
+ }
/*
* Here we are using size of the entire flash chip and
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 388e46b..250e30f 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -384,7 +384,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
}
/**
- * leb_write_lock - lock logical eraseblock for writing.
+ * leb_write_trylock - try to lock logical eraseblock for writing.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 4f0bd6b..590d967 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -66,7 +66,7 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
}
}
-static int anchor_pebs_avalible(struct rb_root *root)
+static int anchor_pebs_available(struct rb_root *root)
{
struct rb_node *p;
struct ubi_wl_entry *e;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 5a832bc..9170596 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -214,9 +214,8 @@ static void assign_aeb_to_av(struct ubi_attach_info *ai,
struct ubi_ainf_volume *av)
{
struct ubi_ainf_peb *tmp_aeb;
- struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+ struct rb_node **p = &av->root.rb_node, *parent = NULL;
- p = &av->root.rb_node;
while (*p) {
parent = *p;
@@ -1063,7 +1062,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) {
while (i--)
- kfree(fm->e[i]);
+ kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
ret = -ENOMEM;
goto free_hdr;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 85237cf..3fd8d7f 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -270,6 +270,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->last_eb_bytes = vol->usable_leb_size;
}
+ /* Make volume "available" before it becomes accessible via sysfs */
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+ ubi->vol_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
@@ -298,11 +304,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (err)
goto out_sysfs;
- spin_lock(&ubi->volumes_lock);
- ubi->volumes[vol_id] = vol;
- ubi->vol_count += 1;
- spin_unlock(&ubi->volumes_lock);
-
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
self_check_volumes(ubi);
return err;
@@ -315,6 +316,10 @@ out_sysfs:
*/
cdev_device_del(&vol->cdev, &vol->dev);
out_mapping:
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = NULL;
+ ubi->vol_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index b5b8cd6..2052a64 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -692,7 +692,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
#ifdef CONFIG_MTD_UBI_FASTMAP
/* Check whether we need to produce an anchor PEB */
if (!anchor)
- anchor = !anchor_pebs_avalible(&ubi->free);
+ anchor = !anchor_pebs_available(&ubi->free);
if (anchor) {
e1 = find_anchor_wl_entry(&ubi->used);
@@ -1529,6 +1529,46 @@ static void shutdown_work(struct ubi_device *ubi)
}
/**
+ * erase_aeb - erase a PEB given in UBI attach info PEB
+ * @ubi: UBI device description object
+ * @aeb: UBI attach info PEB
+ * @sync: If true, erase synchronously. Otherwise schedule for erasure
+ */
+static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
+{
+ struct ubi_wl_entry *e;
+ int err;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+
+ if (sync) {
+ err = sync_erase(ubi, e, false);
+ if (err)
+ goto out_free;
+
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ } else {
+ err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
+ if (err)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ wl_entry_destroy(ubi, e);
+
+ return err;
+}
+
+/**
* ubi_wl_init - initialize the WL sub-system using attaching information.
* @ubi: UBI device description object
* @ai: attaching information
@@ -1566,17 +1606,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
- goto out_free;
-
- e->pnum = aeb->pnum;
- e->ec = aeb->ec;
- ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
- wl_entry_destroy(ubi, e);
+ err = erase_aeb(ubi, aeb, false);
+ if (err)
goto out_free;
- }
found_pebs++;
}
@@ -1585,8 +1617,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
+ if (!e) {
+ err = -ENOMEM;
goto out_free;
+ }
e->pnum = aeb->pnum;
e->ec = aeb->ec;
@@ -1605,8 +1639,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
+ if (!e) {
+ err = -ENOMEM;
goto out_free;
+ }
e->pnum = aeb->pnum;
e->ec = aeb->ec;
@@ -1635,6 +1671,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi->lookuptbl[e->pnum] = e;
} else {
+ bool sync = false;
+
/*
* Usually old Fastmap PEBs are scheduled for erasure
* and we don't have to care about them but if we face
@@ -1644,18 +1682,21 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (ubi->lookuptbl[aeb->pnum])
continue;
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
- goto out_free;
+ /*
+ * The fastmap update code might not find a free PEB for
+ * writing the fastmap anchor to and then reuses the
+ * current fastmap anchor PEB. When this PEB gets erased
+ * and a power cut happens before it is written again we
+ * must make sure that the fastmap attach code doesn't
+ * find any outdated fastmap anchors, hence we erase the
+ * outdated fastmap anchor PEBs synchronously here.
+ */
+ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
+ sync = true;
- e->pnum = aeb->pnum;
- e->ec = aeb->ec;
- ubi_assert(!ubi->lookuptbl[e->pnum]);
- ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
- wl_entry_destroy(ubi, e);
+ err = erase_aeb(ubi, aeb, sync);
+ if (err)
goto out_free;
- }
}
found_pebs++;
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index 2aaa3f7..a9e2d66 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -2,7 +2,7 @@
#ifndef UBI_WL_H
#define UBI_WL_H
#ifdef CONFIG_MTD_UBI_FASTMAP
-static int anchor_pebs_avalible(struct rb_root *root);
+static int anchor_pebs_available(struct rb_root *root);
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 2260063..6e5cf9d 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -413,6 +413,7 @@ static int of_dev_node_match(struct device *dev, const void *data)
return dev->of_node == data;
}
+/* Note this function returns a reference to the mux_chip dev. */
static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np)
{
struct device *dev;
@@ -466,6 +467,7 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
(!args.args_count && (mux_chip->controllers > 1))) {
dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n",
np, args.np);
+ put_device(&mux_chip->dev);
return ERR_PTR(-EINVAL);
}
@@ -476,10 +478,10 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name)
if (controller >= mux_chip->controllers) {
dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n",
np, controller, args.np);
+ put_device(&mux_chip->dev);
return ERR_PTR(-EINVAL);
}
- get_device(&mux_chip->dev);
return &mux_chip->mux[controller];
}
EXPORT_SYMBOL_GPL(mux_control_get);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 0626dcf..760d2c0 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -526,7 +526,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
data = be32_to_cpup((__be32 *)&cf->data[0]);
flexcan_write(data, &priv->tx_mb->data[0]);
}
- if (cf->can_dlc > 3) {
+ if (cf->can_dlc > 4) {
data = be32_to_cpup((__be32 *)&cf->data[4]);
flexcan_write(data, &priv->tx_mb->data[1]);
}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index b003582..12ff002 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -395,6 +395,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
if (dev->can.state == CAN_STATE_ERROR_WARNING ||
dev->can.state == CAN_STATE_ERROR_PASSIVE) {
+ cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (txerr > rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
}
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 68ac3e8..8bf80ad 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
rc);
- return rc;
+ return (rc > 0) ? 0 : rc;
}
static void gs_usb_xmit_callback(struct urb *urb)
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 7ccdc3e..53d6bb0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
int err = 0;
u8 *packet_ptr;
- int i, n = 1, packet_len;
+ int packet_len;
ptrdiff_t cmd_len;
/* usb device unregistered? */
@@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
}
packet_ptr = cmd_head;
+ packet_len = cmd_len;
/* firmware is not able to re-assemble 512 bytes buffer in full-speed */
- if ((dev->udev->speed != USB_SPEED_HIGH) &&
- (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) {
- packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
- n += cmd_len / packet_len;
- } else {
- packet_len = cmd_len;
- }
+ if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
+ packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
- for (i = 0; i < n; i++) {
+ do {
err = usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev,
PCAN_USBPRO_EP_CMDOUT),
@@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
}
packet_ptr += packet_len;
- }
+ cmd_len -= packet_len;
+
+ if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
+ packet_len = cmd_len;
+
+ } while (packet_len > 0);
return err;
}
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 8404e88..b4c4a2c 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
tbp = peer_tb;
}
- if (tbp[IFLA_IFNAME]) {
+ if (ifmp && tbp[IFLA_IFNAME]) {
nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
name_assign_type = NET_NAME_USER;
} else {
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index f5a8dd9..4498ab8 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1500,10 +1500,13 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
{
struct b53_device *dev = ds->priv;
- /* Older models support a different tag format that we do not
- * support in net/dsa/tag_brcm.c yet.
+ /* Older models (5325, 5365) support a different tag format that we do
+ * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
+ * mode to be turned on which means we need to specifically manage ARL
+ * misses on multicast addresses (TBD).
*/
- if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port))
+ if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
+ !b53_can_enable_brcm_tags(ds, port))
return DSA_TAG_PROTO_NONE;
/* Broadcom BCM58xx chips have a flow accelerator on Port 8
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index a7801f6..6315774 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -338,6 +338,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX;
break;
case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_XAUI:
cmode = MV88E6XXX_PORT_STS_CMODE_XAUI;
break;
case PHY_INTERFACE_MODE_RXAUI:
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index f4e13a7..36c8950 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -602,7 +602,7 @@ struct vortex_private {
struct sk_buff* rx_skbuff[RX_RING_SIZE];
struct sk_buff* tx_skbuff[TX_RING_SIZE];
unsigned int cur_rx, cur_tx; /* The next free ring entry */
- unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ unsigned int dirty_tx; /* The ring entries to be free()ed. */
struct vortex_extra_stats xstats; /* NIC-specific extra stats */
struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
@@ -618,7 +618,6 @@ struct vortex_private {
/* The remainder are related to chip state, mostly media selection. */
struct timer_list timer; /* Media selection timer. */
- struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
int options; /* User-settable misc. driver options. */
unsigned int media_override:4, /* Passed-in media type. */
default_media:4, /* Read from the EEPROM/Wn3_Config. */
@@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
static void vortex_timer(struct timer_list *t);
-static void rx_oom_timer(struct timer_list *t);
static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
@@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev)
timer_setup(&vp->timer, vortex_timer, 0);
mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
- timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0);
if (vortex_debug > 1)
pr_debug("%s: Initial media type %s.\n",
@@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev)
window_write16(vp, 0x0040, 4, Wn4_NetDiag);
if (vp->full_bus_master_rx) { /* Boomerang bus master. */
- vp->cur_rx = vp->dirty_rx = 0;
+ vp->cur_rx = 0;
/* Initialize the RxEarly register as recommended. */
iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
iowrite32(0x0020, ioaddr + PktStatus);
@@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev)
struct vortex_private *vp = netdev_priv(dev);
int i;
int retval;
+ dma_addr_t dma;
/* Use the now-standard shared IRQ implementation. */
if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
@@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev)
break; /* Bad news! */
skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
- vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+ dma = pci_map_single(VORTEX_PCI(vp), skb->data,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
+ break;
+ vp->rx_ring[i].addr = cpu_to_le32(dma);
}
if (i != RX_RING_SIZE) {
pr_emerg("%s: no memory for rx ring\n", dev->name);
@@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
int len = (skb->len + 3) & ~3;
vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
spin_lock_irq(&vp->window_lock);
window_set(vp, 7);
iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
@@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev)
int entry = vp->cur_rx % RX_RING_SIZE;
void __iomem *ioaddr = vp->ioaddr;
int rx_status;
- int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
+ int rx_work_limit = RX_RING_SIZE;
if (vortex_debug > 5)
pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
@@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev)
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
- struct sk_buff *skb;
+ struct sk_buff *skb, *newskb;
+ dma_addr_t newdma;
dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
if (vortex_debug > 4)
@@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev)
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_copy++;
} else {
+ /* Pre-allocate the replacement skb. If it or its
+ * mapping fails then recycle the buffer thats already
+ * in place
+ */
+ newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
+ if (!newskb) {
+ dev->stats.rx_dropped++;
+ goto clear_complete;
+ }
+ newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
+ PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
+ dev->stats.rx_dropped++;
+ consume_skb(newskb);
+ goto clear_complete;
+ }
+
/* Pass up the skbuff already on the Rx ring. */
skb = vp->rx_skbuff[entry];
- vp->rx_skbuff[entry] = NULL;
+ vp->rx_skbuff[entry] = newskb;
+ vp->rx_ring[entry].addr = cpu_to_le32(newdma);
skb_put(skb, pkt_len);
pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_nocopy++;
@@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev)
netif_rx(skb);
dev->stats.rx_packets++;
}
- entry = (++vp->cur_rx) % RX_RING_SIZE;
- }
- /* Refill the Rx ring buffers. */
- for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
- struct sk_buff *skb;
- entry = vp->dirty_rx % RX_RING_SIZE;
- if (vp->rx_skbuff[entry] == NULL) {
- skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
- if (skb == NULL) {
- static unsigned long last_jif;
- if (time_after(jiffies, last_jif + 10 * HZ)) {
- pr_warn("%s: memory shortage\n",
- dev->name);
- last_jif = jiffies;
- }
- if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
- mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
- break; /* Bad news! */
- }
- vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
- vp->rx_skbuff[entry] = skb;
- }
+clear_complete:
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
iowrite16(UpUnstall, ioaddr + EL3_CMD);
+ entry = (++vp->cur_rx) % RX_RING_SIZE;
}
return 0;
}
-/*
- * If we've hit a total OOM refilling the Rx ring we poll once a second
- * for some memory. Otherwise there is no way to restart the rx process.
- */
-static void
-rx_oom_timer(struct timer_list *t)
-{
- struct vortex_private *vp = from_timer(vp, t, rx_oom_timer);
- struct net_device *dev = vp->mii.dev;
-
- spin_lock_irq(&vp->lock);
- if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
- boomerang_rx(dev);
- if (vortex_debug > 1) {
- pr_debug("%s: rx_oom_timer %s\n", dev->name,
- ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
- }
- spin_unlock_irq(&vp->lock);
-}
-
static void
vortex_down(struct net_device *dev, int final_down)
{
@@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down)
netdev_reset_queue(dev);
netif_stop_queue(dev);
- del_timer_sync(&vp->rx_oom_timer);
del_timer_sync(&vp->timer);
/* Turn off statistics ASAP. We update dev->stats below. */
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 9497f18..2f91ce8 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -123,7 +123,8 @@ enum mac8390_access {
};
extern int mac8390_memtest(struct net_device *dev);
-static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev,
+static int mac8390_initdev(struct net_device *dev,
+ struct nubus_rsrc *ndev,
enum mac8390_type type);
static int mac8390_open(struct net_device *dev);
@@ -169,11 +170,11 @@ static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
static u32 mac8390_msg_enable;
-static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
+static enum mac8390_type __init mac8390_ident(struct nubus_rsrc *fres)
{
- switch (dev->dr_sw) {
+ switch (fres->dr_sw) {
case NUBUS_DRSW_3COM:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_APPLE_SONIC_NB:
case NUBUS_DRHW_APPLE_SONIC_LC:
case NUBUS_DRHW_SONNET:
@@ -184,7 +185,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
break;
case NUBUS_DRSW_APPLE:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_ASANTE_LC:
return MAC8390_NONE;
case NUBUS_DRHW_CABLETRON:
@@ -201,7 +202,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
case NUBUS_DRSW_TECHWORKS:
case NUBUS_DRSW_DAYNA2:
case NUBUS_DRSW_DAYNA_LC:
- if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
+ if (fres->dr_hw == NUBUS_DRHW_CABLETRON)
return MAC8390_CABLETRON;
else
return MAC8390_APPLE;
@@ -212,7 +213,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
break;
case NUBUS_DRSW_KINETICS:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_INTERLAN:
return MAC8390_INTERLAN;
default:
@@ -225,8 +226,8 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
* These correspond to Dayna Sonic cards
* which use the macsonic driver
*/
- if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
- dev->dr_hw == NUBUS_DRHW_INTERLAN)
+ if (fres->dr_hw == NUBUS_DRHW_SMC9194 ||
+ fres->dr_hw == NUBUS_DRHW_INTERLAN)
return MAC8390_NONE;
else
return MAC8390_DAYNA;
@@ -289,7 +290,8 @@ static int __init mac8390_memsize(unsigned long membase)
return i * 0x1000;
}
-static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
+static bool __init mac8390_init(struct net_device *dev,
+ struct nubus_rsrc *ndev,
enum mac8390_type cardtype)
{
struct nubus_dir dir;
@@ -394,7 +396,7 @@ static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
struct net_device * __init mac8390_probe(int unit)
{
struct net_device *dev;
- struct nubus_dev *ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
int err = -ENODEV;
struct ei_device *ei_local;
@@ -414,8 +416,11 @@ struct net_device * __init mac8390_probe(int unit)
if (unit >= 0)
sprintf(dev->name, "eth%d", unit);
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET,
- ndev))) {
+ for_each_func_rsrc(ndev) {
+ if (ndev->category != NUBUS_CAT_NETWORK ||
+ ndev->type != NUBUS_TYPE_ETHERNET)
+ continue;
+
/* Have we seen it already? */
if (slots & (1 << ndev->board->slot))
continue;
@@ -489,7 +494,7 @@ static const struct net_device_ops mac8390_netdev_ops = {
};
static int __init mac8390_initdev(struct net_device *dev,
- struct nubus_dev *ndev,
+ struct nubus_rsrc *ndev,
enum mac8390_type type)
{
static u32 fwrd4_offsets[16] = {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 97c5a89..fbe21a8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -75,6 +75,9 @@ static struct workqueue_struct *ena_wq;
MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
+static void check_for_admin_com_state(struct ena_adapter *adapter);
+static void ena_destroy_device(struct ena_adapter *adapter);
+static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev)
{
@@ -1565,7 +1568,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
static int ena_up_complete(struct ena_adapter *adapter)
{
- int rc, i;
+ int rc;
rc = ena_rss_configure(adapter);
if (rc)
@@ -1584,17 +1587,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
ena_napi_enable_all(adapter);
- /* Enable completion queues interrupt */
- for (i = 0; i < adapter->num_queues; i++)
- ena_unmask_interrupt(&adapter->tx_ring[i],
- &adapter->rx_ring[i]);
-
- /* schedule napi in case we had pending packets
- * from the last time we disable napi
- */
- for (i = 0; i < adapter->num_queues; i++)
- napi_schedule(&adapter->ena_napi[i].napi);
-
return 0;
}
@@ -1731,7 +1723,7 @@ create_err:
static int ena_up(struct ena_adapter *adapter)
{
- int rc;
+ int rc, i;
netdev_dbg(adapter->netdev, "%s\n", __func__);
@@ -1774,6 +1766,17 @@ static int ena_up(struct ena_adapter *adapter)
set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ /* Enable completion queues interrupt */
+ for (i = 0; i < adapter->num_queues; i++)
+ ena_unmask_interrupt(&adapter->tx_ring[i],
+ &adapter->rx_ring[i]);
+
+ /* schedule napi in case we had pending packets
+ * from the last time we disable napi
+ */
+ for (i = 0; i < adapter->num_queues; i++)
+ napi_schedule(&adapter->ena_napi[i].napi);
+
return rc;
err_up:
@@ -1884,6 +1887,17 @@ static int ena_close(struct net_device *netdev)
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
+ /* Check for device status and issue reset if needed*/
+ check_for_admin_com_state(adapter);
+ if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+ netif_err(adapter, ifdown, adapter->netdev,
+ "Destroy failure, restarting device\n");
+ ena_dump_stats_to_dmesg(adapter);
+ /* rtnl lock already obtained in dev_ioctl() layer */
+ ena_destroy_device(adapter);
+ ena_restore_device(adapter);
+ }
+
return 0;
}
@@ -2544,11 +2558,12 @@ static void ena_destroy_device(struct ena_adapter *adapter)
ena_com_set_admin_running_state(ena_dev, false);
- ena_close(netdev);
+ if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ ena_down(adapter);
/* Before releasing the ENA resources, a device reset is required.
* (to prevent the device from accessing them).
- * In case the reset flag is set and the device is up, ena_close
+ * In case the reset flag is set and the device is up, ena_down()
* already perform the reset, so it can be skipped.
*/
if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 57e7968..105fdb9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -50,7 +50,7 @@
#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U
#define AQ_CFG_PCI_FUNC_PORTS 2U
-#define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ)
+#define AQ_CFG_SERVICE_TIMER_INTERVAL (1 * HZ)
#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ))
#define AQ_CFG_SKB_FRAGS_MAX 32U
@@ -80,6 +80,7 @@
#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
__stringify(NIC_MINOR_DRIVER_VERSION)"."\
__stringify(NIC_BUILD_DRIVER_VERSION)"."\
- __stringify(NIC_REVISION_DRIVER_VERSION)
+ __stringify(NIC_REVISION_DRIVER_VERSION) \
+ AQ_CFG_DRV_VERSION_SUFFIX
#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 70efb74..f2d8063 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -66,14 +66,14 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
"OutUCast",
"OutMCast",
"OutBCast",
- "InUCastOctects",
- "OutUCastOctects",
- "InMCastOctects",
- "OutMCastOctects",
- "InBCastOctects",
- "OutBCastOctects",
- "InOctects",
- "OutOctects",
+ "InUCastOctets",
+ "OutUCastOctets",
+ "InMCastOctets",
+ "OutMCastOctets",
+ "InBCastOctets",
+ "OutBCastOctets",
+ "InOctets",
+ "OutOctets",
"InPacketsDma",
"OutPacketsDma",
"InOctetsDma",
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 0207927..b3825de 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -46,6 +46,28 @@ struct aq_hw_link_status_s {
unsigned int mbps;
};
+struct aq_stats_s {
+ u64 uprc;
+ u64 mprc;
+ u64 bprc;
+ u64 erpt;
+ u64 uptc;
+ u64 mptc;
+ u64 bptc;
+ u64 erpr;
+ u64 mbtc;
+ u64 bbtc;
+ u64 mbrc;
+ u64 bbrc;
+ u64 ubrc;
+ u64 ubtc;
+ u64 dpc;
+ u64 dma_pkt_rc;
+ u64 dma_pkt_tc;
+ u64 dma_oct_rc;
+ u64 dma_oct_tc;
+};
+
#define AQ_HW_IRQ_INVALID 0U
#define AQ_HW_IRQ_LEGACY 1U
#define AQ_HW_IRQ_MSI 2U
@@ -85,7 +107,9 @@ struct aq_hw_ops {
void (*destroy)(struct aq_hw_s *self);
int (*get_hw_caps)(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps);
+ struct aq_hw_caps_s *aq_hw_caps,
+ unsigned short device,
+ unsigned short subsystem_device);
int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
unsigned int frags);
@@ -164,8 +188,7 @@ struct aq_hw_ops {
int (*hw_update_stats)(struct aq_hw_s *self);
- int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
- unsigned int *p_count);
+ struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 78dfb2a..75a894a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -37,6 +37,8 @@ static unsigned int aq_itr_rx;
module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
+static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
+
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -166,11 +168,8 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
static void aq_nic_service_timer_cb(struct timer_list *t)
{
struct aq_nic_s *self = from_timer(self, t, service_timer);
- struct net_device *ndev = aq_nic_get_ndev(self);
+ int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
int err = 0;
- unsigned int i = 0U;
- struct aq_ring_stats_rx_s stats_rx;
- struct aq_ring_stats_tx_s stats_tx;
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
@@ -182,23 +181,14 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
if (self->aq_hw_ops.hw_update_stats)
self->aq_hw_ops.hw_update_stats(self->aq_hw);
- memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
- memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
- for (i = AQ_DIMOF(self->aq_vec); i--;) {
- if (self->aq_vec[i])
- aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
- }
+ aq_nic_update_ndev_stats(self);
- ndev->stats.rx_packets = stats_rx.packets;
- ndev->stats.rx_bytes = stats_rx.bytes;
- ndev->stats.rx_errors = stats_rx.errors;
- ndev->stats.tx_packets = stats_tx.packets;
- ndev->stats.tx_bytes = stats_tx.bytes;
- ndev->stats.tx_errors = stats_tx.errors;
+ /* If no link - use faster timer rate to detect link up asap */
+ if (!netif_carrier_ok(self->ndev))
+ ctimer = max(ctimer / 2, 1);
err_exit:
- mod_timer(&self->service_timer,
- jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
+ mod_timer(&self->service_timer, jiffies + ctimer);
}
static void aq_nic_polling_timer_cb(struct timer_list *t)
@@ -222,7 +212,7 @@ static struct net_device *aq_nic_ndev_alloc(void)
struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
const struct ethtool_ops *et_ops,
- struct device *dev,
+ struct pci_dev *pdev,
struct aq_pci_func_s *aq_pci_func,
unsigned int port,
const struct aq_hw_ops *aq_hw_ops)
@@ -242,7 +232,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
ndev->netdev_ops = ndev_ops;
ndev->ethtool_ops = et_ops;
- SET_NETDEV_DEV(ndev, dev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->if_port = port;
self->ndev = ndev;
@@ -254,7 +244,8 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
&self->aq_hw_ops);
- err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps);
+ err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps,
+ pdev->device, pdev->subsystem_device);
if (err < 0)
goto err_exit;
@@ -749,16 +740,40 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
unsigned int count = 0U;
- int err = 0;
+ struct aq_vec_s *aq_vec = NULL;
+ struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
- err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count);
- if (err < 0)
+ if (!stats)
goto err_exit;
- data += count;
+ data[i] = stats->uprc + stats->mprc + stats->bprc;
+ data[++i] = stats->uprc;
+ data[++i] = stats->mprc;
+ data[++i] = stats->bprc;
+ data[++i] = stats->erpt;
+ data[++i] = stats->uptc + stats->mptc + stats->bptc;
+ data[++i] = stats->uptc;
+ data[++i] = stats->mptc;
+ data[++i] = stats->bptc;
+ data[++i] = stats->ubrc;
+ data[++i] = stats->ubtc;
+ data[++i] = stats->mbrc;
+ data[++i] = stats->mbtc;
+ data[++i] = stats->bbrc;
+ data[++i] = stats->bbtc;
+ data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+ data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+ data[++i] = stats->dma_pkt_rc;
+ data[++i] = stats->dma_pkt_tc;
+ data[++i] = stats->dma_oct_rc;
+ data[++i] = stats->dma_oct_tc;
+ data[++i] = stats->dpc;
+
+ i++;
+
+ data += i;
count = 0U;
for (i = 0U, aq_vec = self->aq_vec[0];
@@ -768,7 +783,20 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
}
err_exit:;
- (void)err;
+}
+
+static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
+{
+ struct net_device *ndev = self->ndev;
+ struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
+
+ ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
+ ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
+ ndev->stats.rx_errors = stats->erpr;
+ ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc;
+ ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc;
+ ndev->stats.tx_errors = stats->erpt;
+ ndev->stats.multicast = stats->mprc;
}
void aq_nic_get_link_ksettings(struct aq_nic_s *self,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 4309983..3c9f8db 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -71,7 +71,7 @@ struct aq_nic_cfg_s {
struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
const struct ethtool_ops *et_ops,
- struct device *dev,
+ struct pci_dev *pdev,
struct aq_pci_func_s *aq_pci_func,
unsigned int port,
const struct aq_hw_ops *aq_hw_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index cadaa64..58c29d0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -51,7 +51,8 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
pci_set_drvdata(pdev, self);
self->pdev = pdev;
- err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps);
+ err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device,
+ pdev->subsystem_device);
if (err < 0)
goto err_exit;
@@ -59,7 +60,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
for (port = 0; port < self->ports; ++port) {
struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
- &pdev->dev, self,
+ pdev, self,
port, aq_hw_ops);
if (!aq_nic) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 07b3c49..f18dce14 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -18,9 +18,20 @@
#include "hw_atl_a0_internal.h"
static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps)
+ struct aq_hw_caps_s *aq_hw_caps,
+ unsigned short device,
+ unsigned short subsystem_device)
{
memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
+
+ if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
+ aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
+
+ if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
+ aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
+ aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G;
+ }
+
return 0;
}
@@ -333,6 +344,10 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+ /* Reset link status and read out initial hardware counters */
+ self->aq_link_status.mbps = 0;
+ hw_atl_utils_update_stats(self);
+
err = aq_hw_err_from_flags(self);
if (err < 0)
goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index ec68c20..e4a22ce 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -16,11 +16,23 @@
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
#include "hw_atl_b0_internal.h"
+#include "hw_atl_llh_internal.h"
static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
- struct aq_hw_caps_s *aq_hw_caps)
+ struct aq_hw_caps_s *aq_hw_caps,
+ unsigned short device,
+ unsigned short subsystem_device)
{
memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
+
+ if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
+ aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
+
+ if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
+ aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
+ aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G;
+ }
+
return 0;
}
@@ -357,6 +369,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
};
int err = 0;
+ u32 val;
self->aq_nic_cfg = aq_nic_cfg;
@@ -374,6 +387,20 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+ /* Force limit MRRS on RDM/TDM to 2K */
+ val = aq_hw_read_reg(self, pci_reg_control6_adr);
+ aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404);
+
+ /* TX DMA total request limit. B0 hardware is not capable to
+ * handle more than (8K-MRRS) incoming DMA data.
+ * Value 24 in 256byte units
+ */
+ aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24);
+
+ /* Reset link status and read out initial hardware counters */
+ self->aq_link_status.mbps = 0;
+ hw_atl_utils_update_stats(self);
+
err = aq_hw_err_from_flags(self);
if (err < 0)
goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 5527fc0..93450ec 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -2343,6 +2343,9 @@
#define tx_dma_desc_base_addrmsw_adr(descriptor) \
(0x00007c04u + (descriptor) * 0x40)
+/* tx dma total request limit */
+#define tx_dma_total_req_limit_adr 0x00007b20u
+
/* tx interrupt moderation control register definitions
* Preprocessor definitions for TX Interrupt Moderation Control Register
* Base Address: 0x00008980
@@ -2369,6 +2372,9 @@
/* default value of bitfield reg_res_dsbl */
#define pci_reg_res_dsbl_default 0x1
+/* PCI core control register */
+#define pci_reg_control6_adr 0x1014u
+
/* global microprocessor scratch pad definitions */
#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 1fe016f..f2ce12e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -503,73 +503,43 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
struct hw_atl_s *hw_self = PHAL_ATLANTIC;
struct hw_aq_atl_utils_mbox mbox;
- if (!self->aq_link_status.mbps)
- return 0;
-
hw_atl_utils_mpi_read_stats(self, &mbox);
#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
mbox.stats._N_ - hw_self->last_stats._N_)
-
- AQ_SDELTA(uprc);
- AQ_SDELTA(mprc);
- AQ_SDELTA(bprc);
- AQ_SDELTA(erpt);
-
- AQ_SDELTA(uptc);
- AQ_SDELTA(mptc);
- AQ_SDELTA(bptc);
- AQ_SDELTA(erpr);
-
- AQ_SDELTA(ubrc);
- AQ_SDELTA(ubtc);
- AQ_SDELTA(mbrc);
- AQ_SDELTA(mbtc);
- AQ_SDELTA(bbrc);
- AQ_SDELTA(bbtc);
- AQ_SDELTA(dpc);
-
+ if (self->aq_link_status.mbps) {
+ AQ_SDELTA(uprc);
+ AQ_SDELTA(mprc);
+ AQ_SDELTA(bprc);
+ AQ_SDELTA(erpt);
+
+ AQ_SDELTA(uptc);
+ AQ_SDELTA(mptc);
+ AQ_SDELTA(bptc);
+ AQ_SDELTA(erpr);
+
+ AQ_SDELTA(ubrc);
+ AQ_SDELTA(ubtc);
+ AQ_SDELTA(mbrc);
+ AQ_SDELTA(mbtc);
+ AQ_SDELTA(bbrc);
+ AQ_SDELTA(bbtc);
+ AQ_SDELTA(dpc);
+ }
#undef AQ_SDELTA
+ hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self);
+ hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self);
+ hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self);
+ hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);
memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
return 0;
}
-int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
- u64 *data, unsigned int *p_count)
+struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
{
- struct hw_atl_s *hw_self = PHAL_ATLANTIC;
- struct hw_atl_stats_s *stats = &hw_self->curr_stats;
- int i = 0;
-
- data[i] = stats->uprc + stats->mprc + stats->bprc;
- data[++i] = stats->uprc;
- data[++i] = stats->mprc;
- data[++i] = stats->bprc;
- data[++i] = stats->erpt;
- data[++i] = stats->uptc + stats->mptc + stats->bptc;
- data[++i] = stats->uptc;
- data[++i] = stats->mptc;
- data[++i] = stats->bptc;
- data[++i] = stats->ubrc;
- data[++i] = stats->ubtc;
- data[++i] = stats->mbrc;
- data[++i] = stats->mbtc;
- data[++i] = stats->bbrc;
- data[++i] = stats->bbtc;
- data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
- data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
- data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self);
- data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self);
- data[++i] = stats_rx_dma_good_octet_counterlsw_get(self);
- data[++i] = stats_tx_dma_good_octet_counterlsw_get(self);
- data[++i] = stats->dpc;
-
- if (p_count)
- *p_count = ++i;
-
- return 0;
+ return &PHAL_ATLANTIC->curr_stats;
}
static const u32 hw_atl_utils_hw_mac_regs[] = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index c99cc69..21aeca6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -129,7 +129,7 @@ struct __packed hw_aq_atl_utils_mbox {
struct __packed hw_atl_s {
struct aq_hw_s base;
struct hw_atl_stats_s last_stats;
- struct hw_atl_stats_s curr_stats;
+ struct aq_stats_s curr_stats;
u64 speed;
unsigned int chip_features;
u32 fw_ver_actual;
@@ -207,8 +207,6 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
int hw_atl_utils_update_stats(struct aq_hw_s *self);
-int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
- u64 *data,
- unsigned int *p_count);
+struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
#endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 0de858d..9009f26 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -11,8 +11,10 @@
#define VER_H
#define NIC_MAJOR_DRIVER_VERSION 1
-#define NIC_MINOR_DRIVER_VERSION 5
-#define NIC_BUILD_DRIVER_VERSION 345
+#define NIC_MINOR_DRIVER_VERSION 6
+#define NIC_BUILD_DRIVER_VERSION 13
#define NIC_REVISION_DRIVER_VERSION 0
+#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
+
#endif /* VER_H */
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index 3c63b16..d9efbc8 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -159,6 +159,8 @@ struct arc_emac_priv {
unsigned int link;
unsigned int duplex;
unsigned int speed;
+
+ unsigned int rx_missed_errors;
};
/**
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 3241af1..bd277b0 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -26,6 +26,8 @@
#include "emac.h"
+static void arc_emac_restart(struct net_device *ndev);
+
/**
* arc_emac_tx_avail - Return the number of available slots in the tx ring.
* @priv: Pointer to ARC EMAC private data structure.
@@ -210,39 +212,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
continue;
}
- pktlen = info & LEN_MASK;
- stats->rx_packets++;
- stats->rx_bytes += pktlen;
- skb = rx_buff->skb;
- skb_put(skb, pktlen);
- skb->dev = ndev;
- skb->protocol = eth_type_trans(skb, ndev);
-
- dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
- dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
-
- /* Prepare the BD for next cycle */
- rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
- EMAC_BUFFER_SIZE);
- if (unlikely(!rx_buff->skb)) {
+ /* Prepare the BD for next cycle. netif_receive_skb()
+ * only if new skb was allocated and mapped to avoid holes
+ * in the RX fifo.
+ */
+ skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
+ if (unlikely(!skb)) {
+ if (net_ratelimit())
+ netdev_err(ndev, "cannot allocate skb\n");
+ /* Return ownership to EMAC */
+ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
stats->rx_errors++;
- /* Because receive_skb is below, increment rx_dropped */
stats->rx_dropped++;
continue;
}
- /* receive_skb only if new skb was allocated to avoid holes */
- netif_receive_skb(skb);
-
- addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+ addr = dma_map_single(&ndev->dev, (void *)skb->data,
EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, addr)) {
if (net_ratelimit())
- netdev_err(ndev, "cannot dma map\n");
- dev_kfree_skb(rx_buff->skb);
+ netdev_err(ndev, "cannot map dma buffer\n");
+ dev_kfree_skb(skb);
+ /* Return ownership to EMAC */
+ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
stats->rx_errors++;
+ stats->rx_dropped++;
continue;
}
+
+ /* unmap previosly mapped skb */
+ dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+ dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
+
+ pktlen = info & LEN_MASK;
+ stats->rx_packets++;
+ stats->rx_bytes += pktlen;
+ skb_put(rx_buff->skb, pktlen);
+ rx_buff->skb->dev = ndev;
+ rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
+
+ netif_receive_skb(rx_buff->skb);
+
+ rx_buff->skb = skb;
dma_unmap_addr_set(rx_buff, addr, addr);
dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
@@ -259,6 +270,53 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
}
/**
+ * arc_emac_rx_miss_handle - handle R_MISS register
+ * @ndev: Pointer to the net_device structure.
+ */
+static void arc_emac_rx_miss_handle(struct net_device *ndev)
+{
+ struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned int miss;
+
+ miss = arc_reg_get(priv, R_MISS);
+ if (miss) {
+ stats->rx_errors += miss;
+ stats->rx_missed_errors += miss;
+ priv->rx_missed_errors += miss;
+ }
+}
+
+/**
+ * arc_emac_rx_stall_check - check RX stall
+ * @ndev: Pointer to the net_device structure.
+ * @budget: How many BDs requested to process on 1 call.
+ * @work_done: How many BDs processed
+ *
+ * Under certain conditions EMAC stop reception of incoming packets and
+ * continuously increment R_MISS register instead of saving data into
+ * provided buffer. This function detect that condition and restart
+ * EMAC.
+ */
+static void arc_emac_rx_stall_check(struct net_device *ndev,
+ int budget, unsigned int work_done)
+{
+ struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct arc_emac_bd *rxbd;
+
+ if (work_done)
+ priv->rx_missed_errors = 0;
+
+ if (priv->rx_missed_errors && budget) {
+ rxbd = &priv->rxbd[priv->last_rx_bd];
+ if (le32_to_cpu(rxbd->info) & FOR_EMAC) {
+ arc_emac_restart(ndev);
+ priv->rx_missed_errors = 0;
+ }
+ }
+}
+
+/**
* arc_emac_poll - NAPI poll handler.
* @napi: Pointer to napi_struct structure.
* @budget: How many BDs to process on 1 call.
@@ -272,6 +330,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
unsigned int work_done;
arc_emac_tx_clean(ndev);
+ arc_emac_rx_miss_handle(ndev);
work_done = arc_emac_rx(ndev, budget);
if (work_done < budget) {
@@ -279,6 +338,8 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
}
+ arc_emac_rx_stall_check(ndev, budget, work_done);
+
return work_done;
}
@@ -320,6 +381,8 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
if (status & MSER_MASK) {
stats->rx_missed_errors += 0x100;
stats->rx_errors += 0x100;
+ priv->rx_missed_errors += 0x100;
+ napi_schedule(&priv->napi);
}
if (status & RXCR_MASK) {
@@ -732,6 +795,63 @@ static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
+/**
+ * arc_emac_restart - Restart EMAC
+ * @ndev: Pointer to net_device structure.
+ *
+ * This function do hardware reset of EMAC in order to restore
+ * network packets reception.
+ */
+static void arc_emac_restart(struct net_device *ndev)
+{
+ struct arc_emac_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ int i;
+
+ if (net_ratelimit())
+ netdev_warn(ndev, "restarting stalled EMAC\n");
+
+ netif_stop_queue(ndev);
+
+ /* Disable interrupts */
+ arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
+
+ /* Disable EMAC */
+ arc_reg_clr(priv, R_CTRL, EN_MASK);
+
+ /* Return the sk_buff to system */
+ arc_free_tx_queue(ndev);
+
+ /* Clean Tx BD's */
+ priv->txbd_curr = 0;
+ priv->txbd_dirty = 0;
+ memset(priv->txbd, 0, TX_RING_SZ);
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ struct arc_emac_bd *rxbd = &priv->rxbd[i];
+ unsigned int info = le32_to_cpu(rxbd->info);
+
+ if (!(info & FOR_EMAC)) {
+ stats->rx_errors++;
+ stats->rx_dropped++;
+ }
+ /* Return ownership to EMAC */
+ rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
+ }
+ priv->last_rx_bd = 0;
+
+ /* Make sure info is visible to EMAC before enable */
+ wmb();
+
+ /* Enable interrupts */
+ arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
+
+ /* Enable EMAC */
+ arc_reg_or(priv, R_CTRL, EN_MASK);
+
+ netif_start_queue(ndev);
+}
+
static const struct net_device_ops arc_emac_netdev_ops = {
.ndo_open = arc_emac_open,
.ndo_stop = arc_emac_stop,
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index c616387..16f9bee 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -199,9 +199,11 @@ static int emac_rockchip_probe(struct platform_device *pdev)
/* RMII interface needs always a rate of 50MHz */
err = clk_set_rate(priv->refclk, 50000000);
- if (err)
+ if (err) {
dev_err(dev,
"failed to change reference clock rate (%d)\n", err);
+ goto out_regulator_disable;
+ }
if (priv->soc_data->need_div_macclk) {
priv->macclk = devm_clk_get(dev, "macclk");
@@ -230,12 +232,14 @@ static int emac_rockchip_probe(struct platform_device *pdev)
err = arc_emac_probe(ndev, interface);
if (err) {
dev_err(dev, "failed to probe arc emac (%d)\n", err);
- goto out_regulator_disable;
+ goto out_clk_disable_macclk;
}
return 0;
+
out_clk_disable_macclk:
- clk_disable_unprepare(priv->macclk);
+ if (priv->soc_data->need_div_macclk)
+ clk_disable_unprepare(priv->macclk);
out_regulator_disable:
if (priv->regulator)
regulator_disable(priv->regulator);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4c739d5..8ae269e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3030,7 +3030,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
del_timer_sync(&bp->timer);
- if (IS_PF(bp)) {
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
/* Set ALWAYS_ALIVE bit in shmem */
bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
bnx2x_drv_pulse(bp);
@@ -3116,7 +3116,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bp->cnic_loaded = false;
/* Clear driver version indication in shmem */
- if (IS_PF(bp))
+ if (IS_PF(bp) && !BP_NOMCP(bp))
bnx2x_update_mng_version(bp);
/* Check if there are pending parity attentions. If there are - set
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 91e2a75..ddd5d3e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)
do {
bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+
+ /* If we read all 0xFFs, means we are in PCI error state and
+ * should bail out to avoid crashes on adapter's FW reads.
+ */
+ if (bp->common.shmem_base == 0xFFFFFFFF) {
+ bp->flags |= NO_MCP_FLAG;
+ return -ENODEV;
+ }
+
if (bp->common.shmem_base) {
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
if (val & SHR_MEM_VALIDITY_MB)
@@ -14320,7 +14329,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
BNX2X_ERR("IO slot reset --> driver unload\n");
/* MCP should have been reset; Need to wait for validity */
- bnx2x_init_shmem(bp);
+ if (bnx2x_init_shmem(bp)) {
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 v;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 5ee1866..c961767 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
return -EINVAL;
}
- if (vf_id >= bp->pf.max_vfs) {
+ if (vf_id >= bp->pf.active_vfs) {
netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 3d201d7..d8fee26 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -421,7 +421,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
}
/* If all IP and L4 fields are wildcarded then this is an L2 flow */
- if (is_wildcard(&l3_mask, sizeof(l3_mask)) &&
+ if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
} else {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index de51c21..8995cfe 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,11 +4,13 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2014 Broadcom Corporation.
+ * Copyright (C) 2005-2016 Broadcom Corporation.
+ * Copyright (C) 2016-2017 Broadcom Limited.
*
* Firmware is:
* Derived from proprietary unpublished source code,
- * Copyright (C) 2000-2003 Broadcom Corporation.
+ * Copyright (C) 2000-2016 Broadcom Corporation.
+ * Copyright (C) 2016-2017 Broadcom Ltd.
*
* Permission is hereby granted for the distribution of this firmware
* data in hexadecimal or equivalent format, provided this copyright
@@ -10052,6 +10054,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
tw32(GRC_MODE, tp->grc_mode | val);
+ /* On one of the AMD platform, MRRS is restricted to 4000 because of
+ * south bridge limitation. As a workaround, Driver is setting MRRS
+ * to 2048 instead of default 4096.
+ */
+ if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+ tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
+ val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
+ tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
+ }
+
/* Setup the timer prescalar register. Clock is always 66Mhz. */
val = tr32(GRC_MISC_CFG);
val &= ~0xff;
@@ -14225,7 +14237,10 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
/* Reset PHY, otherwise the read DMA engine will be in a mode that
* breaks all requests to 256 bytes.
*/
- if (tg3_asic_rev(tp) == ASIC_REV_57766)
+ if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
+ tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
reset_phy = true;
err = tg3_restart_hw(tp, reset_phy);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index c2d02d0..1f0271f 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -5,7 +5,8 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2014 Broadcom Corporation.
+ * Copyright (C) 2007-2016 Broadcom Corporation.
+ * Copyright (C) 2016-2017 Broadcom Limited.
*/
#ifndef _T3_H
@@ -96,6 +97,7 @@
#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106
#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109
#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a
+#define TG3PCI_SUBDEVICE_ID_DELL_5762 0x07f0
#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ
#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c
#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a
@@ -281,6 +283,9 @@
#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
/* 0xa8 --> 0xb8 unused */
+#define TG3PCI_DEV_STATUS_CTRL 0x000000b4
+#define MAX_READ_REQ_SIZE_2048 0x00004000
+#define MAX_READ_REQ_MASK 0x00007000
#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
#define DUAL_MAC_CTRL_CH_MASK 0x00000003
#define DUAL_MAC_CTRL_ID 0x00000004
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6f9fa6e..d8424ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -344,7 +344,6 @@ struct adapter_params {
unsigned int sf_size; /* serial flash size in bytes */
unsigned int sf_nsec; /* # of flash sectors */
- unsigned int sf_fw_start; /* start of FW image in flash */
unsigned int fw_vers; /* firmware version */
unsigned int bs_vers; /* bootstrap version */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index d4a548a..a452d5a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -111,6 +111,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
ethtype_mask = 0;
}
+ if (ethtype_key == ETH_P_IPV6)
+ fs->type = 1;
+
fs->val.ethtype = ethtype_key;
fs->mask.ethtype = ethtype_mask;
fs->val.proto = key->ip_proto;
@@ -205,8 +208,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
VLAN_PRIO_SHIFT);
vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
VLAN_PRIO_SHIFT);
- fs->val.ivlan = cpu_to_be16(vlan_tci);
- fs->mask.ivlan = cpu_to_be16(vlan_tci_mask);
+ fs->val.ivlan = vlan_tci;
+ fs->mask.ivlan = vlan_tci_mask;
/* Chelsio adapters use ivlan_vld bit to match vlan packets
* as 802.1Q. Also, when vlan tag is present in packets,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index f63210f..375ef86 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2844,8 +2844,6 @@ enum {
SF_RD_DATA_FAST = 0xb, /* read flash */
SF_RD_ID = 0x9f, /* read ID */
SF_ERASE_SECTOR = 0xd8, /* erase sector */
-
- FW_MAX_SIZE = 16 * SF_SEC_SIZE,
};
/**
@@ -3558,8 +3556,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
const __be32 *p = (const __be32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
- unsigned int fw_img_start = adap->params.sf_fw_start;
- unsigned int fw_start_sec = fw_img_start / sf_sec_size;
+ unsigned int fw_start_sec = FLASH_FW_START_SEC;
+ unsigned int fw_size = FLASH_FW_MAX_SIZE;
+ unsigned int fw_start = FLASH_FW_START;
if (!size) {
dev_err(adap->pdev_dev, "FW image has no data\n");
@@ -3575,9 +3574,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
"FW image size differs from size in FW header\n");
return -EINVAL;
}
- if (size > FW_MAX_SIZE) {
+ if (size > fw_size) {
dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
- FW_MAX_SIZE);
+ fw_size);
return -EFBIG;
}
if (!t4_fw_matches_chip(adap, hdr))
@@ -3604,11 +3603,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
- ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
+ ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
if (ret)
goto out;
- addr = fw_img_start;
+ addr = fw_start;
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
fw_data += SF_PAGE_SIZE;
@@ -3618,7 +3617,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
}
ret = t4_write_flash(adap,
- fw_img_start + offsetof(struct fw_hdr, fw_ver),
+ fw_start + offsetof(struct fw_hdr, fw_ver),
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
out:
if (ret)
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 410a0a9..b3e7faf 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1913,3 +1913,7 @@ static struct platform_driver cs89x0_driver = {
module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
#endif /* CONFIG_CS89x0_PLATFORM */
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 network driver");
+MODULE_AUTHOR("Russell Nelson <nelson@crynwr.com>");
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index f910f0f..977d4c2 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -187,6 +187,7 @@ struct net_device * __init mac89x0_probe(int unit)
unsigned long ioaddr;
unsigned short sig;
int err = -ENODEV;
+ struct nubus_rsrc *fres;
if (!MACH_IS_MAC)
return ERR_PTR(-ENODEV);
@@ -207,8 +208,9 @@ struct net_device * __init mac89x0_probe(int unit)
/* We might have to parameterize this later */
slot = 0xE;
/* Get out now if there's a real NuBus card in slot E */
- if (nubus_find_slot(slot, NULL) != NULL)
- goto out;
+ for_each_func_rsrc(fres)
+ if (fres->board->slot == slot)
+ goto out;
/* The pseudo-ISA bits always live at offset 0x300 (gee,
wonder why...) */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c6e859a..e180657 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter)
be_schedule_worker(adapter);
+ /*
+ * The IF was destroyed and re-created. We need to clear
+ * all promiscuous flags valid for the destroyed IF.
+ * Without this promisc mode is not restored during
+ * be_open() because the driver thinks that it is
+ * already enabled in HW.
+ */
+ adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
+
if (netif_running(netdev))
status = be_open(netdev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 6105738..a74300a 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -818,6 +818,12 @@ static void fec_enet_bd_init(struct net_device *dev)
for (i = 0; i < txq->bd.ring_size; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = cpu_to_fec16(0);
+ if (bdp->cbd_bufaddr &&
+ !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
if (txq->tx_skbuff[i]) {
dev_kfree_skb_any(txq->tx_skbuff[i]);
txq->tx_skbuff[i] = NULL;
@@ -3463,6 +3469,10 @@ fec_probe(struct platform_device *pdev)
goto failed_regulator;
}
} else {
+ if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto failed_regulator;
+ }
fep->reg_phy = NULL;
}
@@ -3546,8 +3556,9 @@ failed_clk_ipg:
failed_clk:
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
-failed_phy:
of_node_put(phy_node);
+failed_phy:
+ dev_id--;
failed_ioremap:
free_netdev(ndev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 7892f2f0..2c2976a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -613,9 +613,11 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void fs_timeout(struct net_device *dev)
+static void fs_timeout_work(struct work_struct *work)
{
- struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_enet_private *fep = container_of(work, struct fs_enet_private,
+ timeout_work);
+ struct net_device *dev = fep->ndev;
unsigned long flags;
int wake = 0;
@@ -627,7 +629,6 @@ static void fs_timeout(struct net_device *dev)
phy_stop(dev->phydev);
(*fep->ops->stop)(dev);
(*fep->ops->restart)(dev);
- phy_start(dev->phydev);
}
phy_start(dev->phydev);
@@ -639,6 +640,13 @@ static void fs_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+static void fs_timeout(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ schedule_work(&fep->timeout_work);
+}
+
/*-----------------------------------------------------------------------------
* generic link-change handler - should be sufficient for most cases
*-----------------------------------------------------------------------------*/
@@ -759,6 +767,7 @@ static int fs_enet_close(struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
napi_disable(&fep->napi);
+ cancel_work_sync(&fep->timeout_work);
phy_stop(dev->phydev);
spin_lock_irqsave(&fep->lock, flags);
@@ -1019,6 +1028,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->netdev_ops = &fs_enet_netdev_ops;
ndev->watchdog_timeo = 2 * HZ;
+ INIT_WORK(&fep->timeout_work, fs_timeout_work);
netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
ndev->ethtool_ops = &fs_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 92e06b3..195fae6 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -125,6 +125,7 @@ struct fs_enet_private {
spinlock_t lock; /* during all ops except TX pckt processing */
spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
struct fs_platform_info *fpi;
+ struct work_struct timeout_work;
const struct fs_ops *ops;
int rx_ring, tx_ring;
dma_addr_t ring_mem_addr;
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 5441142..9f8d4f8 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
now = tmr_cnt_read(etsects);
now += delta;
tmr_cnt_write(etsects, now);
+ set_fipers(etsects);
spin_unlock_irqrestore(&etsects->lock, flags);
- set_fipers(etsects);
-
return 0;
}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 7feff24..241db31 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -494,6 +494,9 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
case 16384:
ret |= EMAC_MR1_RFS_16K;
break;
+ case 8192:
+ ret |= EMAC4_MR1_RFS_8K;
+ break;
case 4096:
ret |= EMAC_MR1_RFS_4K;
break;
@@ -516,6 +519,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
case 16384:
ret |= EMAC4_MR1_TFS_16K;
break;
+ case 8192:
+ ret |= EMAC4_MR1_TFS_8K;
+ break;
case 4096:
ret |= EMAC4_MR1_TFS_4K;
break;
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index 5afcc27..c26d263 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -151,9 +151,11 @@ struct emac_regs {
#define EMAC4_MR1_RFS_2K 0x00100000
#define EMAC4_MR1_RFS_4K 0x00180000
+#define EMAC4_MR1_RFS_8K 0x00200000
#define EMAC4_MR1_RFS_16K 0x00280000
#define EMAC4_MR1_TFS_2K 0x00020000
#define EMAC4_MR1_TFS_4K 0x00030000
+#define EMAC4_MR1_TFS_8K 0x00040000
#define EMAC4_MR1_TFS_16K 0x00050000
#define EMAC4_MR1_TR 0x00008000
#define EMAC4_MR1_MWSW_001 0x00001000
@@ -242,7 +244,7 @@ struct emac_regs {
#define EMAC_STACR_PHYE 0x00004000
#define EMAC_STACR_STAC_MASK 0x00003000
#define EMAC_STACR_STAC_READ 0x00001000
-#define EMAC_STACR_STAC_WRITE 0x00002000
+#define EMAC_STACR_STAC_WRITE 0x00000800
#define EMAC_STACR_OPBC_MASK 0x00000C00
#define EMAC_STACR_OPBC_50 0x00000000
#define EMAC_STACR_OPBC_66 0x00000400
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1dc4aef..b65f5f3 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -410,6 +410,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
struct ibmvnic_rx_pool *rx_pool;
int rx_scrqs;
int i, j, rc;
+ u64 *size_array;
+
+ size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
for (i = 0; i < rx_scrqs; i++) {
@@ -417,7 +421,17 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
- rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff);
+ if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
+ free_long_term_buff(adapter, &rx_pool->long_term_buff);
+ rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+ rx_pool->size *
+ rx_pool->buff_size);
+ } else {
+ rc = reset_long_term_buff(adapter,
+ &rx_pool->long_term_buff);
+ }
+
if (rc)
return rc;
@@ -439,14 +453,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
static void release_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
- int rx_scrqs;
int i, j;
if (!adapter->rx_pool)
return;
- rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- for (i = 0; i < rx_scrqs; i++) {
+ for (i = 0; i < adapter->num_active_rx_pools; i++) {
rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -469,6 +481,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->rx_pool);
adapter->rx_pool = NULL;
+ adapter->num_active_rx_pools = 0;
}
static int init_rx_pools(struct net_device *netdev)
@@ -493,6 +506,8 @@ static int init_rx_pools(struct net_device *netdev)
return -1;
}
+ adapter->num_active_rx_pools = 0;
+
for (i = 0; i < rxadd_subcrqs; i++) {
rx_pool = &adapter->rx_pool[i];
@@ -536,6 +551,8 @@ static int init_rx_pools(struct net_device *netdev)
rx_pool->next_free = 0;
}
+ adapter->num_active_rx_pools = rxadd_subcrqs;
+
return 0;
}
@@ -586,13 +603,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
static void release_tx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_tx_pool *tx_pool;
- int i, tx_scrqs;
+ int i;
if (!adapter->tx_pool)
return;
- tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
- for (i = 0; i < tx_scrqs; i++) {
+ for (i = 0; i < adapter->num_active_tx_pools; i++) {
netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
tx_pool = &adapter->tx_pool[i];
kfree(tx_pool->tx_buff);
@@ -603,6 +619,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->tx_pool);
adapter->tx_pool = NULL;
+ adapter->num_active_tx_pools = 0;
}
static int init_tx_pools(struct net_device *netdev)
@@ -619,6 +636,8 @@ static int init_tx_pools(struct net_device *netdev)
if (!adapter->tx_pool)
return -1;
+ adapter->num_active_tx_pools = 0;
+
for (i = 0; i < tx_subcrqs; i++) {
tx_pool = &adapter->tx_pool[i];
@@ -666,6 +685,8 @@ static int init_tx_pools(struct net_device *netdev)
tx_pool->producer_index = 0;
}
+ adapter->num_active_tx_pools = tx_subcrqs;
+
return 0;
}
@@ -756,6 +777,12 @@ static int ibmvnic_login(struct net_device *netdev)
}
} while (adapter->renegotiate);
+ /* handle pending MAC address changes after successful login */
+ if (adapter->mac_change_pending) {
+ __ibmvnic_set_mac(netdev, &adapter->desired.mac);
+ adapter->mac_change_pending = false;
+ }
+
return 0;
}
@@ -854,7 +881,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
if (adapter->vpd->buff)
len = adapter->vpd->len;
- reinit_completion(&adapter->fw_done);
+ init_completion(&adapter->fw_done);
crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
crq.get_vpd_size.cmd = GET_VPD_SIZE;
ibmvnic_send_crq(adapter, &crq);
@@ -916,6 +943,13 @@ static int init_resources(struct ibmvnic_adapter *adapter)
if (!adapter->vpd)
return -ENOMEM;
+ /* Vital Product Data (VPD) */
+ rc = ibmvnic_get_vpd(adapter);
+ if (rc) {
+ netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
+ return rc;
+ }
+
adapter->map_id = 1;
adapter->napi = kcalloc(adapter->req_rx_queues,
sizeof(struct napi_struct), GFP_KERNEL);
@@ -989,15 +1023,10 @@ static int __ibmvnic_open(struct net_device *netdev)
static int ibmvnic_open(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int rc, vpd;
+ int rc;
mutex_lock(&adapter->reset_lock);
- if (adapter->mac_change_pending) {
- __ibmvnic_set_mac(netdev, &adapter->desired.mac);
- adapter->mac_change_pending = false;
- }
-
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
if (rc) {
@@ -1017,11 +1046,6 @@ static int ibmvnic_open(struct net_device *netdev)
rc = __ibmvnic_open(netdev);
netif_carrier_on(netdev);
- /* Vital Product Data (VPD) */
- vpd = ibmvnic_get_vpd(adapter);
- if (vpd)
- netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
-
mutex_unlock(&adapter->reset_lock);
return rc;
@@ -1275,6 +1299,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned char *dst;
u64 *handle_array;
int index = 0;
+ u8 proto = 0;
int ret = 0;
if (adapter->resetting) {
@@ -1363,17 +1388,18 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
}
if (skb->protocol == htons(ETH_P_IP)) {
- if (ip_hdr(skb)->version == 4)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
- else if (ip_hdr(skb)->version == 6)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
-
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
- else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
- tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
+ proto = ip_hdr(skb)->protocol;
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
+ proto = ipv6_hdr(skb)->nexthdr;
}
+ if (proto == IPPROTO_TCP)
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
+ else if (proto == IPPROTO_UDP)
+ tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
hdrs += 2;
@@ -1527,7 +1553,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
- if (adapter->state != VNIC_OPEN) {
+ if (adapter->state == VNIC_PROBED) {
memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
adapter->mac_change_pending = true;
return 0;
@@ -1545,6 +1571,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
static int do_reset(struct ibmvnic_adapter *adapter,
struct ibmvnic_rwi *rwi, u32 reset_state)
{
+ u64 old_num_rx_queues, old_num_tx_queues;
struct net_device *netdev = adapter->netdev;
int i, rc;
@@ -1554,6 +1581,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
netif_carrier_off(netdev);
adapter->reset_reason = rwi->reset_reason;
+ old_num_rx_queues = adapter->req_rx_queues;
+ old_num_tx_queues = adapter->req_tx_queues;
+
if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
rc = ibmvnic_reenable_crq_queue(adapter);
if (rc)
@@ -1598,6 +1628,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rc = init_resources(adapter);
if (rc)
return rc;
+ } else if (adapter->req_rx_queues != old_num_rx_queues ||
+ adapter->req_tx_queues != old_num_tx_queues) {
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
+ init_rx_pools(netdev);
+ init_tx_pools(netdev);
} else {
rc = reset_tx_pools(adapter);
if (rc)
@@ -3345,7 +3381,11 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
return;
}
+ adapter->ip_offload_ctrl.len =
+ cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
+ adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
+ adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
@@ -3585,7 +3625,17 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
*req_value,
(long int)be64_to_cpu(crq->request_capability_rsp.
number), name);
- *req_value = be64_to_cpu(crq->request_capability_rsp.number);
+
+ if (be16_to_cpu(crq->request_capability_rsp.capability) ==
+ REQ_MTU) {
+ pr_err("mtu of %llu is not supported. Reverting.\n",
+ *req_value);
+ *req_value = adapter->fallback.mtu;
+ } else {
+ *req_value =
+ be64_to_cpu(crq->request_capability_rsp.number);
+ }
+
ibmvnic_send_req_caps(adapter, 1);
return;
default:
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4487f1e..3aec421 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter {
u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req;
u8 map_id;
+ u64 num_active_rx_pools;
+ u64 num_active_tx_pools;
struct tasklet_struct tasklet;
enum vnic_state state;
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index d7bdea7..8fd2458 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -331,7 +331,8 @@ struct e1000_adapter {
enum e1000_state_t {
__E1000_TESTING,
__E1000_RESETTING,
- __E1000_DOWN
+ __E1000_DOWN,
+ __E1000_DISABLED
};
#undef pr_fmt
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1982f79..3dd4aeb 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
- struct e1000_adapter *adapter;
+ struct e1000_adapter *adapter = NULL;
struct e1000_hw *hw;
static int cards_found;
@@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u16 tmp = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
int bars, need_ioport;
+ bool disable_dev = false;
/* do not allocate ioport bars when not needed */
need_ioport = e1000_is_need_ioport(pdev);
@@ -1259,11 +1260,13 @@ err_mdio_ioremap:
iounmap(hw->ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
err_ioremap:
+ disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
free_netdev(netdev);
err_alloc_etherdev:
pci_release_selected_regions(pdev, bars);
err_pci_reg:
- pci_disable_device(pdev);
+ if (!adapter || disable_dev)
+ pci_disable_device(pdev);
return err;
}
@@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ bool disable_dev;
e1000_down_and_stop(adapter);
e1000_release_manageability(adapter);
@@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev)
iounmap(hw->flash_address);
pci_release_selected_regions(pdev, adapter->bars);
+ disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
free_netdev(netdev);
- pci_disable_device(pdev);
+ if (disable_dev)
+ pci_disable_device(pdev);
}
/**
@@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (netif_running(netdev))
e1000_free_irq(adapter);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+ pci_disable_device(pdev);
return 0;
}
@@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev)
pr_err("Cannot enable PCI device from suspend\n");
return err;
}
+
+ /* flush memory to make sure state is correct */
+ smp_mb__before_atomic();
+ clear_bit(__E1000_DISABLED, &adapter->flags);
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
e1000_down(adapter);
- pci_disable_device(pdev);
+
+ if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
+ pci_disable_device(pdev);
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
pr_err("Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
+
+ /* flush memory to make sure state is correct */
+ smp_mb__before_atomic();
+ clear_bit(__E1000_DISABLED, &adapter->flags);
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index d6d4ed7..31277d3 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1367,6 +1367,9 @@ out:
* Checks to see of the link status of the hardware has changed. If a
* change in link status has been detected, then we read the PHY registers
* to get the current speed/duplex if link exists.
+ *
+ * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
+ * up).
**/
static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
{
@@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* Change or Rx Sequence Error interrupt.
*/
if (!mac->get_link_status)
- return 0;
+ return 1;
/* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
@@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* different link partner.
*/
ret_val = e1000e_config_fc_after_link_up(hw);
- if (ret_val)
+ if (ret_val) {
e_dbg("Error configuring flow control\n");
+ return ret_val;
+ }
- return ret_val;
+ return 1;
}
static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 7f60522..a434fec 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2463,7 +2463,6 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
return err;
}
-#ifdef CONFIG_PM
/**
* fm10k_resume - Generic PM resume hook
* @dev: generic device structure
@@ -2472,7 +2471,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
* suspend or hibernation. This function does not need to handle lower PCIe
* device state as the stack takes care of that for us.
**/
-static int fm10k_resume(struct device *dev)
+static int __maybe_unused fm10k_resume(struct device *dev)
{
struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
struct net_device *netdev = interface->netdev;
@@ -2499,7 +2498,7 @@ static int fm10k_resume(struct device *dev)
* system suspend or hibernation. This function does not need to handle lower
* PCIe device state as the stack takes care of that for us.
**/
-static int fm10k_suspend(struct device *dev)
+static int __maybe_unused fm10k_suspend(struct device *dev)
{
struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
struct net_device *netdev = interface->netdev;
@@ -2511,8 +2510,6 @@ static int fm10k_suspend(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
-
/**
* fm10k_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -2643,11 +2640,9 @@ static struct pci_driver fm10k_driver = {
.id_table = fm10k_pci_tbl,
.probe = fm10k_probe,
.remove = fm10k_remove,
-#ifdef CONFIG_PM
.driver = {
.pm = &fm10k_pm_ops,
},
-#endif /* CONFIG_PM */
.sriov_configure = fm10k_iov_configure,
.err_handler = &fm10k_err_handler
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 321d8be..af79211 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
else
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
+ /* Copy the address first, so that we avoid a possible race with
+ * .set_rx_mode(). If we copy after changing the address in the filter
+ * list, we might open ourselves to a narrow race window where
+ * .set_rx_mode could delete our dev_addr filter and prevent traffic
+ * from passing.
+ */
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, netdev->dev_addr);
i40e_add_mac_filter(vsi, addr->sa_data);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
@@ -1923,6 +1930,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ /* Under some circumstances, we might receive a request to delete
+ * our own device address from our uc list. Because we store the
+ * device address in the VSI's MAC/VLAN filter list, we need to ignore
+ * such requests and not delete our device address from this list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
i40e_del_mac_filter(vsi, addr);
return 0;
@@ -6038,8 +6053,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
/* Set Bit 7 to be valid */
mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
- /* Set L4type to both TCP and UDP support */
- mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
+ /* Set L4type for TCP support */
+ mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
/* Set cloud filter mode */
mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
@@ -6969,18 +6984,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
is_valid_ether_addr(filter->src_mac)) ||
(is_multicast_ether_addr(filter->dst_mac) &&
is_multicast_ether_addr(filter->src_mac)))
- return -EINVAL;
+ return -EOPNOTSUPP;
- /* Make sure port is specified, otherwise bail out, for channel
- * specific cloud filter needs 'L4 port' to be non-zero
+ /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
+ * ports are not supported via big buffer now.
*/
- if (!filter->dst_port)
- return -EINVAL;
+ if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
+ return -EOPNOTSUPP;
/* adding filter using src_port/src_ip is not supported at this stage */
if (filter->src_port || filter->src_ipv4 ||
!ipv6_addr_any(&filter->ip.v6.src_ip6))
- return -EINVAL;
+ return -EOPNOTSUPP;
/* copy element needed to add cloud filter from filter */
i40e_set_cld_element(filter, &cld_filter.element);
@@ -6991,7 +7006,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
is_multicast_ether_addr(filter->src_mac)) {
/* MAC + IP : unsupported mode */
if (filter->dst_ipv4)
- return -EINVAL;
+ return -EOPNOTSUPP;
/* since we validated that L4 port must be valid before
* we get here, start with respective "flags" value
@@ -7356,7 +7371,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
if (tc < 0) {
dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
@@ -7490,6 +7505,8 @@ static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
{
struct i40e_vsi *vsi = np->vsi;
+ if (!tc_can_offload(vsi->netdev))
+ return -EOPNOTSUPP;
if (cls_flower->common.chain_index)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4566d66..5bc2748 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3047,10 +3047,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > I40E_MAX_DATA_PER_TXD) {
+ int align_pad = -(stale->page_offset) &
+ (I40E_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > I40E_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -3058,7 +3078,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 50864f9..1ba29bb 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -2012,10 +2012,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > I40E_MAX_DATA_PER_TXD) {
+ int align_pad = -(stale->page_offset) &
+ (I40E_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > I40E_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -2023,7 +2043,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index bc93b69..a539263 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1214,6 +1214,10 @@ static void mvneta_port_disable(struct mvneta_port *pp)
val &= ~MVNETA_GMAC0_PORT_ENABLE;
mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+ pp->link = 0;
+ pp->duplex = -1;
+ pp->speed = 0;
+
udelay(200);
}
@@ -1958,9 +1962,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
if (!mvneta_rxq_desc_is_first_last(rx_status) ||
(rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+ mvneta_rx_error(pp, rx_desc);
err_drop_frame:
dev->stats.rx_errors++;
- mvneta_rx_error(pp, rx_desc);
/* leave the descriptor untouched */
continue;
}
@@ -3011,7 +3015,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
{
int queue;
- for (queue = 0; queue < txq_number; queue++)
+ for (queue = 0; queue < rxq_number; queue++)
mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 6e423f0..31efc47 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4081,7 +4081,6 @@ static void skge_remove(struct pci_dev *pdev)
if (hw->ports > 1) {
skge_write32(hw, B0_IMSK, 0);
skge_read32(hw, B0_IMSK);
- free_irq(pdev->irq, hw);
}
spin_unlock_irq(&hw->hw_lock);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 54adfd9..fc67e35 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1961,11 +1961,12 @@ static int mtk_hw_init(struct mtk_eth *eth)
/* set GE2 TUNE */
regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
- /* GE1, Force 1000M/FD, FC ON */
- mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
-
- /* GE2, Force 1000M/FD, FC ON */
- mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
+ /* Set linkdown as the default for each GMAC. Its own MCR would be set
+ * up with the more appropriate value when mtk_phy_link_adjust call is
+ * being invoked.
+ */
+ for (i = 0; i < MTK_MAC_COUNT; i++)
+ mtk_w32(eth, 0, MTK_MAC_MCR(i));
/* Indicates CDM to parse the MTK special tag from CPU
* which also is working out for untag packets.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index e0eb695..1fa4849 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -188,7 +188,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct net_device *dev = mdev->pndev[port];
struct mlx4_en_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
- struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_cmd_mailbox *mailbox, *mailbox_priority;
u64 in_mod = reset << 8 | port;
int err;
int i, counter_index;
@@ -198,6 +198,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
+
+ mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev);
+ if (IS_ERR(mailbox_priority)) {
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ return PTR_ERR(mailbox_priority);
+ }
+
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
@@ -206,6 +213,28 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
mlx4_en_stats = mailbox->buf;
+ memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
+ counter_index = mlx4_get_default_counter_index(mdev->dev, port);
+ err = mlx4_get_counter_stats(mdev->dev, counter_index,
+ &tmp_counter_stats, reset);
+
+ /* 0xffs indicates invalid value */
+ memset(mailbox_priority->buf, 0xff,
+ sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
+
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
+ memset(mailbox_priority->buf, 0,
+ sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
+ err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma,
+ in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
+ 0, MLX4_CMD_DUMP_ETH_STATS,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+ if (err)
+ goto out;
+ }
+
+ flowstats = mailbox_priority->buf;
+
spin_lock_bh(&priv->stats_lock);
mlx4_en_fold_software_stats(dev);
@@ -345,31 +374,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
- spin_unlock_bh(&priv->stats_lock);
-
- memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
- counter_index = mlx4_get_default_counter_index(mdev->dev, port);
- err = mlx4_get_counter_stats(mdev->dev, counter_index,
- &tmp_counter_stats, reset);
-
- /* 0xffs indicates invalid value */
- memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
-
- if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
- memset(mailbox->buf, 0,
- sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
- err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
- in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
- 0, MLX4_CMD_DUMP_ETH_STATS,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
- if (err)
- goto out;
- }
-
- flowstats = mailbox->buf;
-
- spin_lock_bh(&priv->stats_lock);
-
if (tmp_counter_stats.counter_mode == 0) {
priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes);
priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes);
@@ -410,6 +414,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 88699b1..946d9db 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -185,7 +185,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
if (priv->mdev->dev->caps.flags &
MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
buf[3] = mlx4_en_test_registers(priv);
- if (priv->port_up)
+ if (priv->port_up && dev->mtu >= MLX4_SELFTEST_LB_MIN_MTU)
buf[4] = mlx4_en_test_loopback(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 1856e27..2b72677 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -153,6 +153,9 @@
#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
+#define PREAMBLE_LEN 8
+#define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
+ ETH_HLEN + PREAMBLE_LEN)
#define MLX4_EN_MIN_MTU 46
/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 04304dd..606a0e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -611,7 +611,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
MLX4_MAX_PORTS;
else
res_alloc->guaranteed[t] = 0;
- res_alloc->res_free -= res_alloc->guaranteed[t];
break;
default:
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 1fffdeb..e9a1fbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -362,7 +362,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
case MLX5_CMD_OP_QUERY_Q_COUNTER:
- case MLX5_CMD_OP_SET_RATE_LIMIT:
+ case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
case MLX5_CMD_OP_QUERY_RATE_LIMIT:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
@@ -505,7 +505,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
- MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
+ MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c0872b3..c2d89bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -82,6 +82,9 @@
max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
+#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
+ (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
+ MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
#define MLX5_MPWRQ_LOG_WQE_SZ 18
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
@@ -590,6 +593,7 @@ struct mlx5e_channel {
struct mlx5_core_dev *mdev;
struct hwtstamp_config *tstamp;
int ix;
+ int cpu;
};
struct mlx5e_channels {
@@ -891,7 +895,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
-void mlx5e_timestamp_set(struct mlx5e_priv *priv);
+void mlx5e_timestamp_init(struct mlx5e_priv *priv);
struct mlx5e_redirect_rqt_param {
bool is_rss;
@@ -935,8 +939,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
-void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params, u8 rq_type);
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ u8 rq_type);
static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index c6d90b6..3d46ef4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -274,6 +274,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
struct ieee_ets *ets)
{
+ bool have_ets_tc = false;
int bw_sum = 0;
int i;
@@ -288,11 +289,14 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
}
/* Validate Bandwidth Sum */
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+ have_ets_tc = true;
bw_sum += ets->tc_tx_bw[i];
+ }
+ }
- if (bw_sum != 0 && bw_sum != 100) {
+ if (have_ets_tc && bw_sum != 100) {
netdev_err(netdev,
"Failed to validate ETS: BW sum is illegal\n");
return -EINVAL;
@@ -918,8 +922,9 @@ static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
static void mlx5e_ets_init(struct mlx5e_priv *priv)
{
- int i;
struct ieee_ets ets;
+ int err;
+ int i;
if (!MLX5_CAP_GEN(priv->mdev, ets))
return;
@@ -932,11 +937,16 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
ets.prio_tc[i] = i;
}
- /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
- ets.prio_tc[0] = 1;
- ets.prio_tc[1] = 0;
+ if (ets.ets_cap > 1) {
+ /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
+ ets.prio_tc[0] = 1;
+ ets.prio_tc[1] = 0;
+ }
- mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+ err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+ if (err)
+ netdev_err(priv->netdev,
+ "%s, Failed to init ETS: %d\n", __func__, err);
}
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 23425f0..ea5fff2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -207,8 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
return;
mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_update_stats(priv, true);
+ mlx5e_update_stats(priv, true);
mutex_unlock(&priv->state_lock);
for (i = 0; i < mlx5e_num_stats_grps; i++)
@@ -1523,8 +1522,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
new_channels.params = priv->channels.params;
MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
- mlx5e_set_rq_type_params(priv->mdev, &new_channels.params,
- new_channels.params.rq_wq_type);
+ new_channels.params.mpwqe_log_stride_sz =
+ MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val);
+ new_channels.params.mpwqe_log_num_strides =
+ MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -1536,6 +1537,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
return err;
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n",
+ MLX5E_GET_PFLAG(&priv->channels.params,
+ MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d2b057a..d8aefee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -71,11 +71,6 @@ struct mlx5e_channel_param {
struct mlx5e_cq_param icosq_cq;
};
-static int mlx5e_get_node(struct mlx5e_priv *priv, int ix)
-{
- return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix);
-}
-
static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_GEN(mdev, striding_rq) &&
@@ -83,8 +78,8 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, reg_umr_sq);
}
-void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params, u8 rq_type)
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params, u8 rq_type)
{
params->rq_wq_type = rq_type;
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@ -93,10 +88,8 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
params->log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
- params->mpwqe_log_stride_sz =
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
- MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
- MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
+ params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
params->mpwqe_log_stride_sz;
break;
@@ -120,13 +113,14 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
}
-static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
{
u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
!params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
MLX5_WQ_TYPE_LINKED_LIST;
- mlx5e_set_rq_type_params(mdev, params, rq_type);
+ mlx5e_init_rq_type_params(mdev, params, rq_type);
}
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -444,17 +438,16 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
int mtt_sz = mlx5e_get_wqe_mtt_sz();
int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
- int node = mlx5e_get_node(c->priv, c->ix);
int i;
rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
- GFP_KERNEL, node);
+ GFP_KERNEL, cpu_to_node(c->cpu));
if (!rq->mpwqe.info)
goto err_out;
/* We allocate more than mtt_sz as we will align the pointer */
- rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz,
- GFP_KERNEL, node);
+ rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+ cpu_to_node(c->cpu));
if (unlikely(!rq->mpwqe.mtt_no_align))
goto err_free_wqe_info;
@@ -562,7 +555,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
int err;
int i;
- rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+ rqp->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl);
@@ -629,8 +622,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
default: /* MLX5_WQ_TYPE_LINKED_LIST */
rq->wqe.frag_info =
kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
- GFP_KERNEL,
- mlx5e_get_node(c->priv, c->ix));
+ GFP_KERNEL, cpu_to_node(c->cpu));
if (!rq->wqe.frag_info) {
err = -ENOMEM;
goto err_rq_wq_destroy;
@@ -1000,13 +992,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
- param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
if (err)
return err;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix));
+ err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
@@ -1053,13 +1045,13 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
- param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
if (err)
return err;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix));
+ err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
@@ -1126,13 +1118,13 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
- param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
if (err)
return err;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix));
+ err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
@@ -1504,8 +1496,8 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
struct mlx5_core_dev *mdev = c->priv->mdev;
int err;
- param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix);
- param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+ param->wq.buf_numa_node = cpu_to_node(c->cpu);
+ param->wq.db_numa_node = cpu_to_node(c->cpu);
param->eq_ix = c->ix;
err = mlx5e_alloc_cq_common(mdev, param, cq);
@@ -1604,6 +1596,11 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
mlx5e_free_cq(cq);
}
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+ return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
@@ -1752,12 +1749,13 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
{
struct mlx5e_cq_moder icocq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
+ int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
unsigned int irq;
int err;
int eqn;
- c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix));
+ c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
@@ -1765,6 +1763,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->mdev = priv->mdev;
c->tstamp = &priv->tstamp;
c->ix = ix;
+ c->cpu = cpu;
c->pdev = &priv->mdev->pdev->dev;
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
@@ -1853,8 +1852,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_rq(&c->rq);
- netif_set_xps_queue(c->netdev,
- mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix);
+ netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -2671,7 +2669,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
netif_carrier_on(netdev);
}
-void mlx5e_timestamp_set(struct mlx5e_priv *priv)
+void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
@@ -2692,7 +2690,6 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_activate_priv_channels(priv);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
- mlx5e_timestamp_set(priv);
if (priv->profile->update_stats)
queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@@ -3221,12 +3218,12 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
return 0;
}
-#define MLX5E_SET_FEATURE(netdev, feature, enable) \
+#define MLX5E_SET_FEATURE(features, feature, enable) \
do { \
if (enable) \
- netdev->features |= feature; \
+ *features |= feature; \
else \
- netdev->features &= ~feature; \
+ *features &= ~feature; \
} while (0)
typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
@@ -3349,6 +3346,7 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
#endif
static int mlx5e_handle_feature(struct net_device *netdev,
+ netdev_features_t *features,
netdev_features_t wanted_features,
netdev_features_t feature,
mlx5e_feature_handler feature_handler)
@@ -3367,34 +3365,40 @@ static int mlx5e_handle_feature(struct net_device *netdev,
return err;
}
- MLX5E_SET_FEATURE(netdev, feature, enable);
+ MLX5E_SET_FEATURE(features, feature, enable);
return 0;
}
static int mlx5e_set_features(struct net_device *netdev,
netdev_features_t features)
{
+ netdev_features_t oper_features = netdev->features;
int err;
- err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
- set_feature_lro);
- err |= mlx5e_handle_feature(netdev, features,
+ err = mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_LRO, set_feature_lro);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
- set_feature_tc_num_filters);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
- set_feature_rx_all);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS,
- set_feature_rx_fcs);
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
- set_feature_rx_vlan);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_HW_TC, set_feature_tc_num_filters);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_RXALL, set_feature_rx_all);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_RXFCS, set_feature_rx_fcs);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
#ifdef CONFIG_RFS_ACCEL
- err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
- set_feature_arfs);
+ err |= mlx5e_handle_feature(netdev, &oper_features, features,
+ NETIF_F_NTUPLE, set_feature_arfs);
#endif
- return err ? -EINVAL : 0;
+ if (err) {
+ netdev->features = oper_features;
+ return -EINVAL;
+ }
+
+ return 0;
}
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
@@ -3679,6 +3683,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
struct sk_buff *skb,
netdev_features_t features)
{
+ unsigned int offset = 0;
struct udphdr *udph;
u8 proto;
u16 port;
@@ -3688,7 +3693,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
proto = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
- proto = ipv6_hdr(skb)->nexthdr;
+ proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
break;
default:
goto out;
@@ -4140,6 +4145,8 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+
+ mlx5e_timestamp_init(priv);
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2c43606..3409d86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -877,6 +877,8 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
mlx5e_build_rep_params(mdev, &priv->channels.params);
mlx5e_build_rep_netdev(netdev);
+
+ mlx5e_timestamp_init(priv);
}
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index e401d9d..b69a705 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -201,9 +201,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
MLX5E_AM_STATS_WORSE;
+ if (!prev->ppms)
+ return curr->ppms ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_SAME;
+
if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
MLX5E_AM_STATS_WORSE;
+ if (!prev->epms)
+ return MLX5E_AM_STATS_SAME;
if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 1f1f8af..5a46082 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -238,15 +238,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
int err = 0;
/* Temporarily enable local_lb */
- if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
- mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
- if (!lbtp->local_lb)
- mlx5_nic_vport_update_local_lb(priv->mdev, true);
+ err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
+ if (err)
+ return err;
+
+ if (!lbtp->local_lb) {
+ err = mlx5_nic_vport_update_local_lb(priv->mdev, true);
+ if (err)
+ return err;
}
err = mlx5e_refresh_tirs(priv, true);
if (err)
- return err;
+ goto out;
lbtp->loopback_ok = false;
init_completion(&lbtp->comp);
@@ -256,16 +260,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
lbtp->pt.dev = priv->netdev;
lbtp->pt.af_packet_priv = lbtp;
dev_add_pack(&lbtp->pt);
+
+ return 0;
+
+out:
+ if (!lbtp->local_lb)
+ mlx5_nic_vport_update_local_lb(priv->mdev, false);
+
return err;
}
static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
struct mlx5e_lbt_priv *lbtp)
{
- if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) {
- if (!lbtp->local_lb)
- mlx5_nic_vport_update_local_lb(priv->mdev, false);
- }
+ if (!lbtp->local_lb)
+ mlx5_nic_vport_update_local_lb(priv->mdev, false);
dev_remove_pack(&lbtp->pt);
mlx5e_refresh_tirs(priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 6077186..e7e7cef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -466,7 +466,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
break;
case MLX5_EVENT_TYPE_CQ_ERROR:
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
- mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
+ mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
cqn, eqe->data.cq_err.syndrome);
mlx5_cq_event(dev, cqn, eqe->type);
break;
@@ -775,7 +775,7 @@ err1:
return err;
}
-int mlx5_stop_eqs(struct mlx5_core_dev *dev)
+void mlx5_stop_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
int err;
@@ -784,22 +784,26 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, pg)) {
err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
if (err)
- return err;
+ mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
+ err);
}
#endif
err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
if (err)
- return err;
+ mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
+ err);
- mlx5_destroy_unmap_eq(dev, &table->async_eq);
+ err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
+ if (err)
+ mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
+ err);
mlx5_cmd_use_polling(dev);
err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
if (err)
- mlx5_cmd_use_events(dev);
-
- return err;
+ mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
+ err);
}
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
index 3c11d6e..1496296 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
@@ -66,6 +66,9 @@ static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size,
u8 actual_size;
int err;
+ if (!size)
+ return -EINVAL;
+
if (!fdev->mdev)
return -ENOTCONN;
@@ -95,6 +98,9 @@ static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size,
u8 actual_size;
int err;
+ if (!size)
+ return -EINVAL;
+
if (!fdev->mdev)
return -ENOTCONN;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index c70fd66..dfaad9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -174,6 +174,8 @@ static void del_hw_fte(struct fs_node *node);
static void del_sw_flow_table(struct fs_node *node);
static void del_sw_flow_group(struct fs_node *node);
static void del_sw_fte(struct fs_node *node);
+static void del_sw_prio(struct fs_node *node);
+static void del_sw_ns(struct fs_node *node);
/* Delete rule (destination) is special case that
* requires to lock the FTE for all the deletion process.
*/
@@ -408,6 +410,16 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
return NULL;
}
+static void del_sw_ns(struct fs_node *node)
+{
+ kfree(node);
+}
+
+static void del_sw_prio(struct fs_node *node)
+{
+ kfree(node);
+}
+
static void del_hw_flow_table(struct fs_node *node)
{
struct mlx5_flow_table *ft;
@@ -2064,7 +2076,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
return ERR_PTR(-ENOMEM);
fs_prio->node.type = FS_TYPE_PRIO;
- tree_init_node(&fs_prio->node, NULL, NULL);
+ tree_init_node(&fs_prio->node, NULL, del_sw_prio);
tree_add_node(&fs_prio->node, &ns->node);
fs_prio->num_levels = num_levels;
fs_prio->prio = prio;
@@ -2090,7 +2102,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
return ERR_PTR(-ENOMEM);
fs_init_namespace(ns);
- tree_init_node(&ns->node, NULL, NULL);
+ tree_init_node(&ns->node, NULL, del_sw_ns);
tree_add_node(&ns->node, &prio->node);
list_add_tail(&ns->node.list, &prio->node.children);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 1a0e797a..21d29f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -241,7 +241,7 @@ static void print_health_info(struct mlx5_core_dev *dev)
u32 fw;
int i;
- /* If the syndrom is 0, the device is OK and no need to print buffer */
+ /* If the syndrome is 0, the device is OK and no need to print buffer */
if (!ioread8(&h->synd))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index d2a66dc..ee2f378 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -57,7 +57,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
/* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
- mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
+ mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
/* RQ size in ipoib by default is 512 */
params->log_rq_size = is_kdump_kernel() ?
@@ -86,6 +86,8 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
mlx5i_build_nic_params(mdev, &priv->channels.params);
+ mlx5e_timestamp_init(priv);
+
/* netdev init */
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
@@ -450,7 +452,6 @@ static int mlx5i_open(struct net_device *netdev)
mlx5e_refresh_tirs(epriv, false);
mlx5e_activate_priv_channels(epriv);
- mlx5e_timestamp_set(epriv);
mutex_unlock(&epriv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index f26f97f..582b2f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -137,6 +137,17 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
+static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
+ bool reset, void *out, int out_size)
+{
+ u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
+
+ MLX5_SET(query_cong_statistics_in, in, opcode,
+ MLX5_CMD_OP_QUERY_CONG_STATISTICS);
+ MLX5_SET(query_cong_statistics_in, in, clear, reset);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
+}
+
static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
{
return dev->priv.lag;
@@ -633,3 +644,48 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
/* If bonded, we do not add an IB device for PF1. */
return false;
}
+
+int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
+ u64 *values,
+ int num_counters,
+ size_t *offsets)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
+ struct mlx5_core_dev *mdev[MLX5_MAX_PORTS];
+ struct mlx5_lag *ldev;
+ int num_ports;
+ int ret, i, j;
+ void *out;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ memset(values, 0, sizeof(*values) * num_counters);
+
+ mutex_lock(&lag_mutex);
+ ldev = mlx5_lag_dev_get(dev);
+ if (ldev && mlx5_lag_is_bonded(ldev)) {
+ num_ports = MLX5_MAX_PORTS;
+ mdev[0] = ldev->pf[0].dev;
+ mdev[1] = ldev->pf[1].dev;
+ } else {
+ num_ports = 1;
+ mdev[0] = dev;
+ }
+
+ for (i = 0; i < num_ports; ++i) {
+ ret = mlx5_cmd_query_cong_counter(mdev[i], false, out, outlen);
+ if (ret)
+ goto unlock;
+
+ for (j = 0; j < num_counters; ++j)
+ values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
+ }
+
+unlock:
+ mutex_unlock(&lag_mutex);
+ kvfree(out);
+ return ret;
+}
+EXPORT_SYMBOL(mlx5_lag_query_cong_counters);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index fa8aed6..5701f12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -423,9 +423,13 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
+ ptp_event.index = pin;
+ ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
+ be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) {
ptp_event.type = PTP_CLOCK_PPSUSR;
- ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp);
+ ptp_event.pps_times.ts_real =
+ ns_to_timespec64(ptp_event.timestamp);
} else {
ptp_event.type = PTP_CLOCK_EXTTS;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 5f32344..0f88fd3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -317,11 +317,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_eq_table *table = &priv->eq_table;
- struct irq_affinity irqdesc = {
- .pre_vectors = MLX5_EQ_VEC_COMP_BASE,
- };
int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
+ int err;
nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
MLX5_EQ_VEC_COMP_BASE;
@@ -331,22 +329,23 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
if (!priv->irq_info)
- goto err_free_msix;
+ return -ENOMEM;
- nvec = pci_alloc_irq_vectors_affinity(dev->pdev,
+ nvec = pci_alloc_irq_vectors(dev->pdev,
MLX5_EQ_VEC_COMP_BASE + 1, nvec,
- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
- &irqdesc);
- if (nvec < 0)
- return nvec;
+ PCI_IRQ_MSIX);
+ if (nvec < 0) {
+ err = nvec;
+ goto err_free_irq_info;
+ }
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
return 0;
-err_free_msix:
+err_free_irq_info:
kfree(priv->irq_info);
- return -ENOMEM;
+ return err;
}
static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev)
@@ -582,8 +581,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
int ret = 0;
/* Disable local_lb by default */
- if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
- MLX5_CAP_GEN(dev, disable_local_lb))
+ if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
ret = mlx5_nic_vport_update_local_lb(dev, false);
return ret;
@@ -622,6 +620,63 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
return (u64)timer_l | (u64)timer_h1 << 32;
}
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+
+ if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+ mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+ return -ENOMEM;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
+ priv->irq_info[i].mask);
+
+ if (IS_ENABLED(CONFIG_SMP) &&
+ irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+ mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
+
+ return 0;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+
+ irq_set_affinity_hint(irq, NULL);
+ free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+ err = mlx5_irq_set_affinity_hint(mdev, i);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ for (i--; i >= 0; i--)
+ mlx5_irq_clear_affinity_hint(mdev, i);
+
+ return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+ mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn)
{
@@ -1068,9 +1123,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_stop_poll;
}
- if (boot && mlx5_init_once(dev, priv)) {
- dev_err(&pdev->dev, "sw objs init failed\n");
- goto err_stop_poll;
+ if (boot) {
+ err = mlx5_init_once(dev, priv);
+ if (err) {
+ dev_err(&pdev->dev, "sw objs init failed\n");
+ goto err_stop_poll;
+ }
}
err = mlx5_alloc_irq_vectors(dev);
@@ -1080,8 +1138,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
dev->priv.uar = mlx5_get_uars_page(dev);
- if (!dev->priv.uar) {
+ if (IS_ERR(dev->priv.uar)) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
+ err = PTR_ERR(dev->priv.uar);
goto err_disable_msix;
}
@@ -1097,6 +1156,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_stop_eqs;
}
+ err = mlx5_irq_set_affinity_hints(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+ goto err_affinity_hints;
+ }
+
err = mlx5_init_fs(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init flow steering\n");
@@ -1154,6 +1219,9 @@ err_sriov:
mlx5_cleanup_fs(dev);
err_fs:
+ mlx5_irq_clear_affinity_hints(dev);
+
+err_affinity_hints:
free_comp_eqs(dev);
err_stop_eqs:
@@ -1222,6 +1290,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_sriov_detach(dev);
mlx5_cleanup_fs(dev);
+ mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_put_uars_page(dev, priv->uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index db9e665..889130e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -213,8 +213,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
err_cmd:
memset(din, 0, sizeof(din));
memset(dout, 0, sizeof(dout));
- MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
- MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index e651e4c..d3c33e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -125,16 +125,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
return ret_entry;
}
-static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
+static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
u32 rate, u16 index)
{
- u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
- MLX5_SET(set_rate_limit_in, in, opcode,
- MLX5_CMD_OP_SET_RATE_LIMIT);
- MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
- MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
+ MLX5_SET(set_pp_rate_limit_in, in, opcode,
+ MLX5_CMD_OP_SET_PP_RATE_LIMIT);
+ MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
+ MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
@@ -173,7 +173,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
entry->refcount++;
} else {
/* new rate limit */
- err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
+ err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);
if (err) {
mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
rate, err);
@@ -209,7 +209,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
entry->refcount--;
if (!entry->refcount) {
/* need to remove rate */
- mlx5_set_rate_limit_cmd(dev, 0, entry->index);
+ mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);
entry->rate = 0;
}
@@ -262,8 +262,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
/* Clear all configured rates */
for (i = 0; i < table->max_size; i++)
if (table->rl_entry[i].rate)
- mlx5_set_rate_limit_cmd(dev, 0,
- table->rl_entry[i].index);
+ mlx5_set_pp_rate_limit_cmd(dev, 0,
+ table->rl_entry[i].index);
kfree(dev->priv.rl_table.rl_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 222b259..8b97066 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -168,18 +168,16 @@ struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
struct mlx5_uars_page *ret;
mutex_lock(&mdev->priv.bfregs.reg_head.lock);
- if (list_empty(&mdev->priv.bfregs.reg_head.list)) {
- ret = alloc_uars_page(mdev, false);
- if (IS_ERR(ret)) {
- ret = NULL;
- goto out;
- }
- list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
- } else {
+ if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
struct mlx5_uars_page, list);
kref_get(&ret->ref_count);
+ goto out;
}
+ ret = alloc_uars_page(mdev, false);
+ if (IS_ERR(ret))
+ goto out;
+ list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
out:
mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index d653b00..a1296a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -908,23 +908,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
void *in;
int err;
- mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable");
+ if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
+ !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
+ return 0;
+
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_nic_vport_context_in, in,
- field_select.disable_mc_local_lb, 1);
- MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.disable_mc_local_lb, !enable);
-
- MLX5_SET(modify_nic_vport_context_in, in,
- field_select.disable_uc_local_lb, 1);
MLX5_SET(modify_nic_vport_context_in, in,
nic_vport_context.disable_uc_local_lb, !enable);
+ if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.disable_mc_local_lb, 1);
+
+ if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.disable_uc_local_lb, 1);
+
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ if (!err)
+ mlx5_core_dbg(mdev, "%s local_lb\n",
+ enable ? "enable" : "disable");
+
kvfree(in);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index 07a9ba6..2f74953 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
struct mlx5e_vxlan *vxlan;
- spin_lock(&vxlan_db->lock);
+ spin_lock_bh(&vxlan_db->lock);
vxlan = radix_tree_lookup(&vxlan_db->tree, port);
- spin_unlock(&vxlan_db->lock);
+ spin_unlock_bh(&vxlan_db->lock);
return vxlan;
}
@@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
struct mlx5e_vxlan *vxlan;
int err;
- if (mlx5e_vxlan_lookup_port(priv, port))
+ mutex_lock(&priv->state_lock);
+ vxlan = mlx5e_vxlan_lookup_port(priv, port);
+ if (vxlan) {
+ atomic_inc(&vxlan->refcount);
goto free_work;
+ }
if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
goto free_work;
@@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
goto err_delete_port;
vxlan->udp_port = port;
+ atomic_set(&vxlan->refcount, 1);
- spin_lock_irq(&vxlan_db->lock);
+ spin_lock_bh(&vxlan_db->lock);
err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
- spin_unlock_irq(&vxlan_db->lock);
+ spin_unlock_bh(&vxlan_db->lock);
if (err)
goto err_free;
@@ -113,35 +118,39 @@ err_free:
err_delete_port:
mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
free_work:
+ mutex_unlock(&priv->state_lock);
kfree(vxlan_work);
}
-static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_del_port(struct work_struct *work)
{
+ struct mlx5e_vxlan_work *vxlan_work =
+ container_of(work, struct mlx5e_vxlan_work, work);
+ struct mlx5e_priv *priv = vxlan_work->priv;
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+ u16 port = vxlan_work->port;
struct mlx5e_vxlan *vxlan;
+ bool remove = false;
- spin_lock_irq(&vxlan_db->lock);
- vxlan = radix_tree_delete(&vxlan_db->tree, port);
- spin_unlock_irq(&vxlan_db->lock);
-
+ mutex_lock(&priv->state_lock);
+ spin_lock_bh(&vxlan_db->lock);
+ vxlan = radix_tree_lookup(&vxlan_db->tree, port);
if (!vxlan)
- return;
-
- mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
-
- kfree(vxlan);
-}
+ goto out_unlock;
-static void mlx5e_vxlan_del_port(struct work_struct *work)
-{
- struct mlx5e_vxlan_work *vxlan_work =
- container_of(work, struct mlx5e_vxlan_work, work);
- struct mlx5e_priv *priv = vxlan_work->priv;
- u16 port = vxlan_work->port;
+ if (atomic_dec_and_test(&vxlan->refcount)) {
+ radix_tree_delete(&vxlan_db->tree, port);
+ remove = true;
+ }
- __mlx5e_vxlan_core_del_port(priv, port);
+out_unlock:
+ spin_unlock_bh(&vxlan_db->lock);
+ if (remove) {
+ mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+ kfree(vxlan);
+ }
+ mutex_unlock(&priv->state_lock);
kfree(vxlan_work);
}
@@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
struct mlx5e_vxlan *vxlan;
unsigned int port = 0;
- spin_lock_irq(&vxlan_db->lock);
+ /* Lockless since we are the only radix-tree consumers, wq is disabled */
while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
port = vxlan->udp_port;
- spin_unlock_irq(&vxlan_db->lock);
- __mlx5e_vxlan_core_del_port(priv, (u16)port);
- spin_lock_irq(&vxlan_db->lock);
+ radix_tree_delete(&vxlan_db->tree, port);
+ mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+ kfree(vxlan);
}
- spin_unlock_irq(&vxlan_db->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index 5def12c..5ef6ae7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -36,6 +36,7 @@
#include "en.h"
struct mlx5e_vxlan {
+ atomic_t refcount;
u16 udp_port;
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 23f7d82..6ef20e5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1643,7 +1643,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
return 0;
}
- wmb(); /* reset needs to be written before we read control register */
+ /* Reset needs to be written before we read control register, and
+ * we must wait for the HW to become responsive once again
+ */
+ wmb();
+ msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
+
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
do {
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index a644120..fb082ad2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -59,6 +59,7 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
+#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
#define MLXSW_PCI_FW_READY_MAGIC 0x5E
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 2d0897b..c3837ca 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4300,6 +4300,7 @@ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ u16 vid = 1;
int err;
err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
@@ -4312,8 +4313,19 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
true, false);
if (err)
goto err_port_vlan_set;
+
+ for (; vid <= VLAN_N_VID - 1; vid++) {
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
+ vid, false);
+ if (err)
+ goto err_vid_learning_set;
+ }
+
return 0;
+err_vid_learning_set:
+ for (vid--; vid >= 1; vid--)
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
err_port_vlan_set:
mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
err_port_stp_set:
@@ -4323,6 +4335,12 @@ err_port_stp_set:
static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ u16 vid;
+
+ for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
+ vid, true);
+
mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
false, false);
mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
@@ -4358,7 +4376,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
}
if (!info->linking)
break;
- if (netdev_has_any_upper_dev(upper_dev)) {
+ if (netdev_has_any_upper_dev(upper_dev) &&
+ (!netif_is_bridge_master(upper_dev) ||
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
+ upper_dev))) {
NL_SET_ERR_MSG(extack,
"spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
@@ -4486,6 +4507,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct net_device *upper_dev;
@@ -4502,7 +4524,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
}
if (!info->linking)
break;
- if (netdev_has_any_upper_dev(upper_dev)) {
+ if (netdev_has_any_upper_dev(upper_dev) &&
+ (!netif_is_bridge_master(upper_dev) ||
+ !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
+ upper_dev))) {
NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 432ab9b..05ce1be 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -365,6 +365,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev);
+bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev);
/* spectrum.c */
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index c33beac..b5397da 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -46,7 +46,8 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
u32 probability, bool is_ecn)
{
- char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)];
+ char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
+ char cwtp_cmd[MLXSW_REG_CWTP_LEN];
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
int err;
@@ -60,10 +61,10 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num,
+ mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 72ef4f8..7042c85 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -821,13 +821,18 @@ static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
int err;
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
- if (err)
- return err;
fib->lpm_tree = new_tree;
mlxsw_sp_lpm_tree_hold(new_tree);
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
+ if (err)
+ goto err_tree_bind;
mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
return 0;
+
+err_tree_bind:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
+ fib->lpm_tree = old_tree;
+ return err;
}
static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
@@ -868,11 +873,14 @@ err_tree_replace:
return err;
no_replace:
- err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
- if (err)
- return err;
fib->lpm_tree = new_tree;
mlxsw_sp_lpm_tree_hold(new_tree);
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
+ if (err) {
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
+ fib->lpm_tree = NULL;
+ return err;
+ }
return 0;
}
@@ -1934,11 +1942,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
dipn = htonl(dip);
dev = mlxsw_sp->router->rifs[rif]->dev;
n = neigh_lookup(&arp_tbl, &dipn, dev);
- if (!n) {
- netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
- &dip);
+ if (!n)
return;
- }
netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
neigh_event_send(n, NULL);
@@ -1965,11 +1970,8 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
dev = mlxsw_sp->router->rifs[rif]->dev;
n = neigh_lookup(&nd_tbl, &dip, dev);
- if (!n) {
- netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
- &dip);
+ if (!n)
return;
- }
netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
neigh_event_send(n, NULL);
@@ -2436,25 +2438,16 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
}
-static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_rif *rif)
-{
- char rauht_pl[MLXSW_REG_RAUHT_LEN];
-
- mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
- rif->rif_index, rif->addr);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
-}
-
static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
- mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
- rif_list_node)
+ rif_list_node) {
+ mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+ }
}
enum mlxsw_sp_nexthop_type {
@@ -3237,7 +3230,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
{
if (!removing)
nh->should_offload = 1;
- else if (nh->offloaded)
+ else
nh->should_offload = 0;
nh->update = 1;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 7b8548e..593ad31 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -152,6 +152,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
return NULL;
}
+bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev)
+{
+ return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+}
+
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
struct net_device *br_dev)
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index a42433f..b922ab5 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -311,7 +311,7 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
{
struct sonic_local* lp = netdev_priv(dev);
int sr;
- int commslot = 0;
+ bool commslot = macintosh_config->expansion_type == MAC_EXP_PDS_COMM;
if (!MACH_IS_MAC)
return -ENODEV;
@@ -322,10 +322,7 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
Ethernet (BTW, the Ethernet *is* always at the same
address, and nothing else lives there, at least if Apple's
documentation is to be believed) */
- if (macintosh_config->ident == MAC_MODEL_Q630 ||
- macintosh_config->ident == MAC_MODEL_P588 ||
- macintosh_config->ident == MAC_MODEL_P575 ||
- macintosh_config->ident == MAC_MODEL_C610) {
+ if (commslot || macintosh_config->ident == MAC_MODEL_C610) {
int card_present;
card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS);
@@ -333,7 +330,6 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
printk("none.\n");
return -ENODEV;
}
- commslot = 1;
}
printk("yes\n");
@@ -428,26 +424,26 @@ static int mac_nubus_sonic_ethernet_addr(struct net_device *dev,
return 0;
}
-static int macsonic_ident(struct nubus_dev *ndev)
+static int macsonic_ident(struct nubus_rsrc *fres)
{
- if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC &&
- ndev->dr_sw == NUBUS_DRSW_SONIC_LC)
+ if (fres->dr_hw == NUBUS_DRHW_ASANTE_LC &&
+ fres->dr_sw == NUBUS_DRSW_SONIC_LC)
return MACSONIC_DAYNALINK;
- if (ndev->dr_hw == NUBUS_DRHW_SONIC &&
- ndev->dr_sw == NUBUS_DRSW_APPLE) {
+ if (fres->dr_hw == NUBUS_DRHW_SONIC &&
+ fres->dr_sw == NUBUS_DRSW_APPLE) {
/* There has to be a better way to do this... */
- if (strstr(ndev->board->name, "DuoDock"))
+ if (strstr(fres->board->name, "DuoDock"))
return MACSONIC_DUODOCK;
else
return MACSONIC_APPLE;
}
- if (ndev->dr_hw == NUBUS_DRHW_SMC9194 &&
- ndev->dr_sw == NUBUS_DRSW_DAYNA)
+ if (fres->dr_hw == NUBUS_DRHW_SMC9194 &&
+ fres->dr_sw == NUBUS_DRSW_DAYNA)
return MACSONIC_DAYNA;
- if (ndev->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC &&
- ndev->dr_sw == 0) { /* huh? */
+ if (fres->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC &&
+ fres->dr_sw == 0) { /* huh? */
return MACSONIC_APPLE16;
}
return -1;
@@ -456,7 +452,7 @@ static int macsonic_ident(struct nubus_dev *ndev)
static int mac_nubus_sonic_probe(struct net_device *dev)
{
static int slots;
- struct nubus_dev* ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
struct sonic_local* lp = netdev_priv(dev);
unsigned long base_addr, prom_addr;
u16 sonic_dcr;
@@ -464,9 +460,11 @@ static int mac_nubus_sonic_probe(struct net_device *dev)
int reg_offset, dma_bitmode;
/* Find the first SONIC that hasn't been initialized already */
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK,
- NUBUS_TYPE_ETHERNET, ndev)) != NULL)
- {
+ for_each_func_rsrc(ndev) {
+ if (ndev->category != NUBUS_CAT_NETWORK ||
+ ndev->type != NUBUS_TYPE_ETHERNET)
+ continue;
+
/* Have we seen it already? */
if (slots & (1<<ndev->board->slot))
continue;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index e379b78..13190aa 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -82,10 +82,33 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
return nfp_net_ebpf_capable(nn) ? "BPF" : "";
}
+static int
+nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
+{
+ int err;
+
+ nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL);
+ if (!nn->app_priv)
+ return -ENOMEM;
+
+ err = nfp_app_nic_vnic_alloc(app, nn, id);
+ if (err)
+ goto err_free_priv;
+
+ return 0;
+err_free_priv:
+ kfree(nn->app_priv);
+ return err;
+}
+
static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
{
+ struct nfp_bpf_vnic *bv = nn->app_priv;
+
if (nn->dp.bpf_offload_xdp)
nfp_bpf_xdp_offload(app, nn, NULL);
+ WARN_ON(bv->tc_prog);
+ kfree(bv);
}
static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
@@ -93,6 +116,9 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
{
struct tc_cls_bpf_offload *cls_bpf = type_data;
struct nfp_net *nn = cb_priv;
+ struct bpf_prog *oldprog;
+ struct nfp_bpf_vnic *bv;
+ int err;
if (type != TC_SETUP_CLSBPF ||
!tc_can_offload(nn->dp.netdev) ||
@@ -100,8 +126,6 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
cls_bpf->common.protocol != htons(ETH_P_ALL) ||
cls_bpf->common.chain_index)
return -EOPNOTSUPP;
- if (nn->dp.bpf_offload_xdp)
- return -EBUSY;
/* Only support TC direct action */
if (!cls_bpf->exts_integrated ||
@@ -110,16 +134,25 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
return -EOPNOTSUPP;
}
- switch (cls_bpf->command) {
- case TC_CLSBPF_REPLACE:
- return nfp_net_bpf_offload(nn, cls_bpf->prog, true);
- case TC_CLSBPF_ADD:
- return nfp_net_bpf_offload(nn, cls_bpf->prog, false);
- case TC_CLSBPF_DESTROY:
- return nfp_net_bpf_offload(nn, NULL, true);
- default:
+ if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
return -EOPNOTSUPP;
+
+ bv = nn->app_priv;
+ oldprog = cls_bpf->oldprog;
+
+ /* Don't remove if oldprog doesn't match driver's state */
+ if (bv->tc_prog != oldprog) {
+ oldprog = NULL;
+ if (!cls_bpf->prog)
+ return 0;
}
+
+ err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog);
+ if (err)
+ return err;
+
+ bv->tc_prog = cls_bpf->prog;
+ return 0;
}
static int nfp_bpf_setup_tc_block(struct net_device *netdev,
@@ -167,7 +200,7 @@ const struct nfp_app_type app_bpf = {
.extra_cap = nfp_bpf_extra_cap,
- .vnic_alloc = nfp_app_nic_vnic_alloc,
+ .vnic_alloc = nfp_bpf_vnic_alloc,
.vnic_free = nfp_bpf_vnic_free,
.setup_tc = nfp_bpf_setup_tc,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 082a15f..57b6043 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -172,6 +172,14 @@ struct nfp_prog {
struct list_head insns;
};
+/**
+ * struct nfp_bpf_vnic - per-vNIC BPF priv structure
+ * @tc_prog: currently loaded cls_bpf program
+ */
+struct nfp_bpf_vnic {
+ struct bpf_prog *tc_prog;
+};
+
int nfp_bpf_jit(struct nfp_prog *prog);
extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1a603fd..99b0487 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -568,6 +568,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
return err;
}
nn_writeb(nn, ctrl_offset, entry->entry);
+ nfp_net_irq_unmask(nn, entry->entry);
return 0;
}
@@ -582,6 +583,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
unsigned int vector_idx)
{
nn_writeb(nn, ctrl_offset, 0xff);
+ nn_pci_flush(nn);
free_irq(nn->irq_entries[vector_idx].vector, nn);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 2801ecd..6c02b2d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -333,7 +333,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
ls >= ARRAY_SIZE(ls_to_ethtool))
return 0;
- cmd->base.speed = ls_to_ethtool[sts];
+ cmd->base.speed = ls_to_ethtool[ls];
cmd->base.duplex = DUPLEX_FULL;
return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index c8c4b39..b7abb82 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -358,10 +358,27 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
kfree(p_rdma_info);
}
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
+{
+ qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
+}
+
static void qed_rdma_free(struct qed_hwfn *p_hwfn)
{
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
+ qed_rdma_free_reserved_lkey(p_hwfn);
qed_rdma_resc_free(p_hwfn);
}
@@ -615,9 +632,6 @@ static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
- /* The first DPI is reserved for the Kernel */
- __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
-
/* Tid 0 will be used as the key for "reserved MR".
* The driver should allocate memory for it so it can be loaded but no
* ramrod should be passed on it.
@@ -797,17 +811,6 @@ static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
return p_hwfn->p_rdma_info->dev;
}
-static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
-}
-
static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
{
struct qed_hwfn *p_hwfn;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index be48d9a..cd9a029 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -97,9 +97,7 @@ static int __qed_spq_block(struct qed_hwfn *p_hwfn,
while (iter_cnt--) {
/* Validate we receive completion update */
- if (READ_ONCE(comp_done->done) == 1) {
- /* Read updated FW return value */
- smp_read_barrier_depends();
+ if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
@@ -776,6 +774,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
int rc = 0;
struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
bool b_ret_ent = true;
+ bool eblock;
if (!p_hwfn)
return -EINVAL;
@@ -794,6 +793,11 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
if (rc)
goto spq_post_fail;
+ /* Check if entry is in block mode before qed_spq_add_entry,
+ * which might kfree p_ent.
+ */
+ eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
+
/* Add the request to the pending queue */
rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
if (rc)
@@ -811,7 +815,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_spq->lock);
- if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
+ if (eblock) {
/* For entries in QED BLOCK mode, the completion code cannot
* perform the necessary cleanup - if it did, we couldn't
* access p_ent here to see whether it's successful or not.
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 18461fc..53dbf1e 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -47,6 +47,7 @@
#define MDIO_CLK_25_28 7
#define MDIO_WAIT_TIMES 1000
+#define MDIO_STATUS_DELAY_TIME 1
static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
{
@@ -65,7 +66,7 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
!(reg & (MDIO_START | MDIO_BUSY)),
- 100, MDIO_WAIT_TIMES * 100))
+ MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
return -EIO;
return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
@@ -88,8 +89,8 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
writel(reg, adpt->base + EMAC_MDIO_CTRL);
if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
- !(reg & (MDIO_START | MDIO_BUSY)), 100,
- MDIO_WAIT_TIMES * 100))
+ !(reg & (MDIO_START | MDIO_BUSY)),
+ MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
return -EIO;
return 0;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 70c92b6..38c924bd 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -253,18 +253,18 @@ static int emac_open(struct net_device *netdev)
return ret;
}
- ret = emac_mac_up(adpt);
+ ret = adpt->phy.open(adpt);
if (ret) {
emac_mac_rx_tx_rings_free_all(adpt);
free_irq(irq->irq, irq);
return ret;
}
- ret = adpt->phy.open(adpt);
+ ret = emac_mac_up(adpt);
if (ret) {
- emac_mac_down(adpt);
emac_mac_rx_tx_rings_free_all(adpt);
free_irq(irq->irq, irq);
+ adpt->phy.close(adpt);
return ret;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index fc0d5fa..734286e 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2244,19 +2244,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
void __iomem *ioaddr = tp->mmio_addr;
dma_addr_t paddr = tp->counters_phys_addr;
u32 cmd;
- bool ret;
RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
+ RTL_R32(CounterAddrHigh);
cmd = (u64)paddr & DMA_BIT_MASK(32);
RTL_W32(CounterAddrLow, cmd);
RTL_W32(CounterAddrLow, cmd | counter_cmd);
- ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
-
- RTL_W32(CounterAddrLow, 0);
- RTL_W32(CounterAddrHigh, 0);
-
- return ret;
+ return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
}
static bool rtl8169_reset_counters(struct net_device *dev)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 2b962d3..009780d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2308,32 +2308,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
struct ravb_private *priv = netdev_priv(ndev);
int ret = 0;
- if (priv->wol_enabled) {
- /* Reduce the usecount of the clock to zero and then
- * restore it to its original value. This is done to force
- * the clock to be re-enabled which is a workaround
- * for renesas-cpg-mssr driver which do not enable clocks
- * when resuming from PSCI suspend/resume.
- *
- * Without this workaround the driver fails to communicate
- * with the hardware if WoL was enabled when the system
- * entered PSCI suspend. This is due to that if WoL is enabled
- * we explicitly keep the clock from being turned off when
- * suspending, but in PSCI sleep power is cut so the clock
- * is disabled anyhow, the clock driver is not aware of this
- * so the clock is not turned back on when resuming.
- *
- * TODO: once the renesas-cpg-mssr suspend/resume is working
- * this clock dance should be removed.
- */
- clk_disable(priv->clk);
- clk_disable(priv->clk);
- clk_enable(priv->clk);
- clk_enable(priv->clk);
-
- /* Set reset mode to rearm the WoL logic */
+ /* If WoL is enabled set reset mode to rearm the WoL logic */
+ if (priv->wol_enabled)
ravb_write(ndev, CCC_OPC_RESET, CCC);
- }
/* All register have been reset to default values.
* Restore all registers which where setup at probe time and
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index db72d13..53924a4 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWNLCR0] = 0x0090,
[FWALCR0] = 0x0094,
[TXNLCR1] = 0x00a0,
- [TXALCR1] = 0x00a0,
+ [TXALCR1] = 0x00a4,
[RXNLCR1] = 0x00a8,
[RXALCR1] = 0x00ac,
[FWNLCR1] = 0x00b0,
@@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[FWNLCR0] = 0x0090,
[FWALCR0] = 0x0094,
[TXNLCR1] = 0x00a0,
- [TXALCR1] = 0x00a0,
+ [TXALCR1] = 0x00a4,
[RXNLCR1] = 0x00a8,
[RXALCR1] = 0x00ac,
[FWNLCR1] = 0x00b0,
@@ -1892,6 +1892,16 @@ static int sh_eth_phy_init(struct net_device *ndev)
return PTR_ERR(phydev);
}
+ /* mask with MAC supported features */
+ if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
+ int err = phy_set_max_speed(phydev, SPEED_100);
+ if (err) {
+ netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
+ phy_disconnect(phydev);
+ return err;
+ }
+ }
+
phy_attached_info(phydev);
return 0;
@@ -2079,8 +2089,8 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
add_reg(CSMR);
if (cd->select_mii)
add_reg(RMII_MII);
- add_reg(ARSTR);
if (cd->tsu) {
+ add_tsu_reg(ARSTR);
add_tsu_reg(TSU_CTRST);
add_tsu_reg(TSU_FWEN0);
add_tsu_reg(TSU_FWEN1);
@@ -3215,18 +3225,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* ioremap the TSU registers */
if (mdp->cd->tsu) {
struct resource *rtsu;
+
rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
- if (IS_ERR(mdp->tsu_addr)) {
- ret = PTR_ERR(mdp->tsu_addr);
+ if (!rtsu) {
+ dev_err(&pdev->dev, "no TSU resource\n");
+ ret = -ENODEV;
+ goto out_release;
+ }
+ /* We can only request the TSU region for the first port
+ * of the two sharing this TSU for the probe to succeed...
+ */
+ if (devno % 2 == 0 &&
+ !devm_request_mem_region(&pdev->dev, rtsu->start,
+ resource_size(rtsu),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "can't request TSU resource.\n");
+ ret = -EBUSY;
+ goto out_release;
+ }
+ mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
+ resource_size(rtsu));
+ if (!mdp->tsu_addr) {
+ dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
+ ret = -ENOMEM;
goto out_release;
}
mdp->port = devno % 2;
ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
}
- /* initialize first or needed device */
- if (!devno || pd->needs_init) {
+ /* Need to init only the first port of the two sharing a TSU */
+ if (devno % 2 == 0) {
if (mdp->cd->chip_reset)
mdp->cd->chip_reset(ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e1e5ac0..ce2ea2d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -409,7 +409,7 @@ struct stmmac_desc_ops {
/* get timestamp value */
u64(*get_timestamp) (void *desc, u32 ats);
/* get rx timestamp status */
- int (*get_rx_timestamp_status) (void *desc, u32 ats);
+ int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
/* Display ring */
void (*display_ring)(void *head, unsigned int size, bool rx);
/* set MSS via context descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 4b286e2..7e089bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -258,7 +258,8 @@ static int dwmac4_rx_check_timestamp(void *desc)
return ret;
}
-static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
+static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
+ u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
int ret = -EINVAL;
@@ -270,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
/* Check if timestamp is OK from context descriptor */
do {
- ret = dwmac4_rx_check_timestamp(desc);
+ ret = dwmac4_rx_check_timestamp(next_desc);
if (ret < 0)
goto exit;
i++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 7546b36..2a828a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -400,7 +400,8 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats)
return ns;
}
-static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
+static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
+ u32 ats)
{
if (ats) {
struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index f817f8f..db4cee5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -265,7 +265,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats)
return ns;
}
-static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
+static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 721b616..08c19eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -34,6 +34,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
{
u32 value = readl(ioaddr + PTP_TCR);
unsigned long data;
+ u32 reg_value;
/* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
* formula = (1/ptp_clock) * 1000000000
@@ -50,10 +51,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
data &= PTP_SSIR_SSINC_MASK;
+ reg_value = data;
if (gmac4)
- data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
+ reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT;
- writel(data, ioaddr + PTP_SSIR);
+ writel(reg_value, ioaddr + PTP_SSIR);
return data;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d725053..c0af0bc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
bool stmmac_eee_init(struct stmmac_priv *priv)
{
struct net_device *ndev = priv->dev;
+ int interface = priv->plat->interface;
unsigned long flags;
bool ret = false;
+ if ((interface != PHY_INTERFACE_MODE_MII) &&
+ (interface != PHY_INTERFACE_MODE_GMII) &&
+ !phy_interface_mode_is_rgmii(interface))
+ goto out;
+
/* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE.
*/
@@ -482,7 +488,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
desc = np;
/* Check if timestamp is available */
- if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
+ if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
shhwtstamp = skb_hwtstamps(skb);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index ed58c74..f5a7eb2 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -715,7 +715,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
/* warning!!!! We are retrieving the virtual ptr in the sw_data
* field as a 32bit value. Will not work on 64bit machines
*/
- page = (struct page *)GET_SW_DATA0(desc);
+ page = (struct page *)GET_SW_DATA0(ndesc);
if (likely(dma_buff && buf_len && page)) {
dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index b718a02..64fda2e 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(rt))
return PTR_ERR(rt);
+ if (skb_dst(skb)) {
+ int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
+ GENEVE_BASE_HLEN - info->options_len - 14;
+
+ skb_dst_update_pmtu(skb, mtu);
+ }
+
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(dst))
return PTR_ERR(dst);
+ if (skb_dst(skb)) {
+ int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
+ GENEVE_BASE_HLEN - info->options_len - 14;
+
+ skb_dst_update_pmtu(skb, mtu);
+ }
+
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 8483f03..1ab97d9 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1379,8 +1379,8 @@ static int rr_close(struct net_device *dev)
rrpriv->info_dma);
rrpriv->info = NULL;
- free_irq(pdev->irq, dev);
spin_unlock_irqrestore(&rrpriv->lock, flags);
+ free_irq(pdev->irq, dev);
return 0;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a178c5e..a0f2be8 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1444,9 +1444,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
return 0;
unregister_netdev:
+ /* macvlan_uninit would free the macvlan port */
unregister_netdevice(dev);
+ return err;
destroy_macvlan_port:
- if (create)
+ /* the macvlan port may be freed by macvlan_uninit when fail to register.
+ * so we destroy the macvlan port only when it's valid.
+ */
+ if (create && macvlan_port_get_rtnl(dev))
macvlan_port_destroy(port->dev);
return err;
}
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 5f93e6a..e911e49 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -239,14 +239,10 @@ static int at803x_resume(struct phy_device *phydev)
{
int value;
- mutex_lock(&phydev->lock);
-
value = phy_read(phydev, MII_BMCR);
value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
phy_write(phydev, MII_BMCR, value);
- mutex_unlock(&phydev->lock);
-
return 0;
}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 4d02b27..82104ed 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -637,6 +637,10 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
if (err < 0)
goto error;
+ /* Do not touch the fiber page if we're in copper->sgmii mode */
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
+ return 0;
+
/* Then the fiber link */
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
@@ -875,6 +879,8 @@ static int m88e1510_config_init(struct phy_device *phydev)
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ u32 pause;
+
/* Select page 18 */
err = marvell_set_page(phydev, 18);
if (err < 0)
@@ -898,6 +904,16 @@ static int m88e1510_config_init(struct phy_device *phydev)
err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
+
+ /* There appears to be a bug in the 88e1512 when used in
+ * SGMII to copper mode, where the AN advertisment register
+ * clears the pause bits each time a negotiation occurs.
+ * This means we can never be truely sure what was advertised,
+ * so disable Pause support.
+ */
+ pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ phydev->supported &= ~pause;
+ phydev->advertising &= ~pause;
}
return m88e1121_config_init(phydev);
@@ -2069,7 +2085,7 @@ static struct phy_driver marvell_drivers[] = {
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1145_config_init,
- .config_aneg = &marvell_config_aneg,
+ .config_aneg = &m88e1101_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 1352965..6425ce0 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
data->regulator = devm_regulator_get(&pdev->dev, "phy");
if (IS_ERR(data->regulator)) {
- if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_out_free_mdiobus;
+ }
dev_info(&pdev->dev, "no regulator found\n");
data->regulator = NULL;
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index bfd3090..07c6048 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -194,8 +194,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)
}
ret = xgene_enet_ecc_init(pdata);
- if (ret)
+ if (ret) {
+ if (pdata->dev->of_node)
+ clk_disable_unprepare(pdata->clk);
return ret;
+ }
xgene_gmac_reset(pdata);
return 0;
@@ -388,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev)
return ret;
mdio_bus = mdiobus_alloc();
- if (!mdio_bus)
- return -ENOMEM;
+ if (!mdio_bus) {
+ ret = -ENOMEM;
+ goto out_clk;
+ }
mdio_bus->name = "APM X-Gene MDIO bus";
@@ -418,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)
mdio_bus->phy_mask = ~0;
ret = mdiobus_register(mdio_bus);
if (ret)
- goto out;
+ goto out_mdiobus;
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1,
acpi_register_phy, NULL, mdio_bus, NULL);
@@ -426,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev)
}
if (ret)
- goto out;
+ goto out_mdiobus;
pdata->mdio_bus = mdio_bus;
xgene_mdio_status = true;
return 0;
-out:
+out_mdiobus:
mdiobus_free(mdio_bus);
+out_clk:
+ if (dev->of_node)
+ clk_disable_unprepare(pdata->clk);
+
return ret;
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2df7b62c..54d00a1 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -270,6 +270,7 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
if (addr == mdiodev->addr) {
dev->of_node = child;
+ dev->fwnode = of_fwnode_handle(child);
return;
}
}
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 1ea69b7..842eb87 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -22,6 +22,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
+#include <linux/bitfield.h>
static int meson_gxl_config_init(struct phy_device *phydev)
{
@@ -50,6 +51,77 @@ static int meson_gxl_config_init(struct phy_device *phydev)
return 0;
}
+/* This function is provided to cope with the possible failures of this phy
+ * during aneg process. When aneg fails, the PHY reports that aneg is done
+ * but the value found in MII_LPA is wrong:
+ * - Early failures: MII_LPA is just 0x0001. if MII_EXPANSION reports that
+ * the link partner (LP) supports aneg but the LP never acked our base
+ * code word, it is likely that we never sent it to begin with.
+ * - Late failures: MII_LPA is filled with a value which seems to make sense
+ * but it actually is not what the LP is advertising. It seems that we
+ * can detect this using a magic bit in the WOL bank (reg 12 - bit 12).
+ * If this particular bit is not set when aneg is reported being done,
+ * it means MII_LPA is likely to be wrong.
+ *
+ * In both case, forcing a restart of the aneg process solve the problem.
+ * When this failure happens, the first retry is usually successful but,
+ * in some cases, it may take up to 6 retries to get a decent result
+ */
+static int meson_gxl_read_status(struct phy_device *phydev)
+{
+ int ret, wol, lpa, exp;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ ret = genphy_aneg_done(phydev);
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ goto read_status_continue;
+
+ /* Need to access WOL bank, make sure the access is open */
+ ret = phy_write(phydev, 0x14, 0x0000);
+ if (ret)
+ return ret;
+ ret = phy_write(phydev, 0x14, 0x0400);
+ if (ret)
+ return ret;
+ ret = phy_write(phydev, 0x14, 0x0000);
+ if (ret)
+ return ret;
+ ret = phy_write(phydev, 0x14, 0x0400);
+ if (ret)
+ return ret;
+
+ /* Request LPI_STATUS WOL register */
+ ret = phy_write(phydev, 0x14, 0x8D80);
+ if (ret)
+ return ret;
+
+ /* Read LPI_STATUS value */
+ wol = phy_read(phydev, 0x15);
+ if (wol < 0)
+ return wol;
+
+ lpa = phy_read(phydev, MII_LPA);
+ if (lpa < 0)
+ return lpa;
+
+ exp = phy_read(phydev, MII_EXPANSION);
+ if (exp < 0)
+ return exp;
+
+ if (!(wol & BIT(12)) ||
+ ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) {
+ /* Looks like aneg failed after all */
+ phydev_dbg(phydev, "LPA corruption - aneg restart\n");
+ return genphy_restart_aneg(phydev);
+ }
+ }
+
+read_status_continue:
+ return genphy_read_status(phydev);
+}
+
static struct phy_driver meson_gxl_phy[] = {
{
.phy_id = 0x01814400,
@@ -60,7 +132,7 @@ static struct phy_driver meson_gxl_phy[] = {
.config_init = meson_gxl_config_init,
.config_aneg = genphy_config_aneg,
.aneg_done = genphy_aneg_done,
- .read_status = genphy_read_status,
+ .read_status = meson_gxl_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
},
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ab46141..422ff63 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -624,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
phydev->link = 0;
if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
phydev->drv->config_intr(phydev);
+ return genphy_config_aneg(phydev);
}
return 0;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 2b1e67b..ed10d1f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -828,7 +828,6 @@ EXPORT_SYMBOL(phy_stop);
*/
void phy_start(struct phy_device *phydev)
{
- bool do_resume = false;
int err = 0;
mutex_lock(&phydev->lock);
@@ -841,6 +840,9 @@ void phy_start(struct phy_device *phydev)
phydev->state = PHY_UP;
break;
case PHY_HALTED:
+ /* if phy was suspended, bring the physical link up again */
+ phy_resume(phydev);
+
/* make sure interrupts are re-enabled for the PHY */
if (phydev->irq != PHY_POLL) {
err = phy_enable_interrupts(phydev);
@@ -849,17 +851,12 @@ void phy_start(struct phy_device *phydev)
}
phydev->state = PHY_RESUMING;
- do_resume = true;
break;
default:
break;
}
mutex_unlock(&phydev->lock);
- /* if phy was suspended, bring the physical link up again */
- if (do_resume)
- phy_resume(phydev);
-
phy_trigger_machine(phydev, true);
}
EXPORT_SYMBOL(phy_start);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 67f25ac..b15b31c 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -135,7 +135,9 @@ static int mdio_bus_phy_resume(struct device *dev)
if (!mdio_bus_phy_may_suspend(phydev))
goto no_resume;
+ mutex_lock(&phydev->lock);
ret = phy_resume(phydev);
+ mutex_unlock(&phydev->lock);
if (ret < 0)
return ret;
@@ -1026,7 +1028,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (err)
goto error;
+ mutex_lock(&phydev->lock);
phy_resume(phydev);
+ mutex_unlock(&phydev->lock);
phy_led_triggers_register(phydev);
return err;
@@ -1157,6 +1161,8 @@ int phy_resume(struct phy_device *phydev)
struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
int ret = 0;
+ WARN_ON(!mutex_is_locked(&phydev->lock));
+
if (phydev->drv && phydrv->resume)
ret = phydrv->resume(phydev);
@@ -1639,13 +1645,9 @@ int genphy_resume(struct phy_device *phydev)
{
int value;
- mutex_lock(&phydev->lock);
-
value = phy_read(phydev, MII_BMCR);
phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
- mutex_unlock(&phydev->lock);
-
return 0;
}
EXPORT_SYMBOL(genphy_resume);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 5dc9668..249ce5c 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -526,6 +526,7 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
pl->link_config.pause = MLO_PAUSE_AN;
pl->link_config.speed = SPEED_UNKNOWN;
pl->link_config.duplex = DUPLEX_UNKNOWN;
+ pl->link_config.an_enabled = true;
pl->ops = ops;
__set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
@@ -951,6 +952,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
mutex_lock(&pl->state_mutex);
/* Configure the MAC to match the new settings */
linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising);
+ pl->link_config.interface = config.interface;
pl->link_config.speed = our_kset.base.speed;
pl->link_config.duplex = our_kset.base.duplex;
pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
@@ -1294,6 +1296,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
mii->phy_id = pl->phydev->mdio.addr;
+ /* fall through */
case SIOCGMIIREG:
ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num);
@@ -1316,6 +1319,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
mii->phy_id = 0;
+ /* fall through */
case SIOCGMIIREG:
ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num);
@@ -1427,9 +1431,8 @@ static void phylink_sfp_link_down(void *upstream)
WARN_ON(!lockdep_rtnl_is_held());
set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
+ queue_work(system_power_efficient_wq, &pl->resolve);
flush_work(&pl->resolve);
-
- netif_carrier_off(pl->netdev);
}
static void phylink_sfp_link_up(void *upstream)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 8a1b1f4..ab64a14 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -356,7 +356,8 @@ EXPORT_SYMBOL_GPL(sfp_register_upstream);
void sfp_unregister_upstream(struct sfp_bus *bus)
{
rtnl_lock();
- sfp_unregister_bus(bus);
+ if (bus->sfp)
+ sfp_unregister_bus(bus);
bus->upstream = NULL;
bus->netdev = NULL;
rtnl_unlock();
@@ -459,7 +460,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket);
void sfp_unregister_socket(struct sfp_bus *bus)
{
rtnl_lock();
- sfp_unregister_bus(bus);
+ if (bus->netdev)
+ sfp_unregister_bus(bus);
bus->sfp_dev = NULL;
bus->sfp = NULL;
bus->socket_ops = NULL;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index d8e5747..264d4af 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1006,17 +1006,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
if (!ifname_is_set)
snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
+ mutex_unlock(&pn->all_ppp_mutex);
+
ret = register_netdevice(ppp->dev);
if (ret < 0)
goto err_unit;
atomic_inc(&ppp_unit_count);
- mutex_unlock(&pn->all_ppp_mutex);
-
return 0;
err_unit:
+ mutex_lock(&pn->all_ppp_mutex);
unit_put(&pn->units_idr, ppp->file.index);
err:
mutex_unlock(&pn->all_ppp_mutex);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 4e1da16..5aa59f4 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
struct pppoe_hdr *ph;
struct net_device *dev;
char *start;
+ int hlen;
lock_sock(sk);
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
@@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
if (total_len > (dev->mtu + dev->hard_header_len))
goto end;
-
- skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
- 0, GFP_KERNEL);
+ hlen = LL_RESERVED_SPACE(dev);
+ skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
+ dev->needed_tailroom, 0, GFP_KERNEL);
if (!skb) {
error = -ENOMEM;
goto end;
}
/* Reserve space for headers. */
- skb_reserve(skb, dev->hard_header_len);
+ skb_reserve(skb, hlen);
skb_reset_network_header(skb);
skb->dev = dev;
@@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
/* Copy the data if there is no space for the header or if it's
* read-only.
*/
- if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
+ if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
goto abort;
__skb_push(skb, sizeof(*ph));
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4f4a842..a8ec589 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -611,6 +611,14 @@ static void tun_queue_purge(struct tun_file *tfile)
skb_queue_purge(&tfile->sk.sk_error_queue);
}
+static void tun_cleanup_tx_array(struct tun_file *tfile)
+{
+ if (tfile->tx_array.ring.queue) {
+ skb_array_cleanup(&tfile->tx_array);
+ memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
+ }
+}
+
static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
@@ -657,8 +665,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
- if (tun)
- skb_array_cleanup(&tfile->tx_array);
+ tun_cleanup_tx_array(tfile);
sock_put(&tfile->sk);
}
}
@@ -700,11 +707,13 @@ static void tun_detach_all(struct net_device *dev)
/* Drop read queue */
tun_queue_purge(tfile);
sock_put(&tfile->sk);
+ tun_cleanup_tx_array(tfile);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
tun_enable_queue(tfile);
tun_queue_purge(tfile);
sock_put(&tfile->sk);
+ tun_cleanup_tx_array(tfile);
}
BUG_ON(tun->numdisabled != 0);
@@ -2851,6 +2860,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
+ memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
+
return 0;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 94c7804..ec56ff2 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2396,6 +2396,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
dev->rx_qlen = 4;
+ dev->tx_qlen = 4;
}
ret = lan78xx_write_reg(dev, BURST_CAP, buf);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 304ec65..728819f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1100,6 +1100,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+ {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
@@ -1204,12 +1205,14 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d51d9ab..0657203 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -606,6 +606,7 @@ enum rtl8152_flags {
PHY_RESET,
SCHEDULE_NAPI,
GREEN_ETHERNET,
+ DELL_TB_RX_AGG_BUG,
};
/* Define these values to match your device */
@@ -1798,6 +1799,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
dev_kfree_skb_any(skb);
remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
+
+ if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
+ break;
}
if (!skb_queue_empty(&skb_head)) {
@@ -4133,6 +4137,9 @@ static void r8153_init(struct r8152 *tp)
/* rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
+ if (test_bit(DELL_TB_RX_AGG_BUG, &tp->flags))
+ ocp_data |= RX_AGG_DISABLE;
+
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
rtl_tally_reset(tp);
@@ -5207,6 +5214,12 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
+ if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
+ udev->serial && !strcmp(udev->serial, "000001000000")) {
+ dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
+ set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
+ }
+
netdev->ethtool_ops = &ops;
netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index d56fe32..8a22ff6 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -457,12 +457,10 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
void usbnet_defer_kevent (struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
- if (!schedule_work (&dev->kevent)) {
- if (net_ratelimit())
- netdev_err(dev->net, "kevent %d may have been dropped\n", work);
- } else {
+ if (!schedule_work (&dev->kevent))
+ netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
+ else
netdev_dbg(dev->net, "kevent %d scheduled\n", work);
- }
}
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d1c7029..cf95290 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1616,7 +1616,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
rq->rx_ring[i].basePA);
rq->rx_ring[i].base = NULL;
}
- rq->buf_info[i] = NULL;
}
if (rq->data_ring.base) {
@@ -1638,6 +1637,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
(rq->rx_ring[0].size + rq->rx_ring[1].size);
dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
rq->buf_info_pa);
+ rq->buf_info[0] = rq->buf_info[1] = NULL;
}
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index feb1b2e..139c61c 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -673,8 +673,9 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
struct sock *sk,
struct sk_buff *skb)
{
- /* don't divert multicast */
- if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+ /* don't divert multicast or local broadcast */
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
+ ipv4_is_lbcast(ip_hdr(skb)->daddr))
return skb;
if (qdisc_tx_is_default(vrf_dev))
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 19b9cc5..c3e34e3 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2155,6 +2155,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ndst = &rt->dst;
+ if (skb_dst(skb)) {
+ int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
+
+ skb_dst_update_pmtu(skb, mtu);
+ }
+
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
@@ -2190,6 +2196,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto out_unlock;
}
+ if (skb_dst(skb)) {
+ int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
+
+ skb_dst_update_pmtu(skb, mtu);
+ }
+
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
@@ -3103,6 +3115,11 @@ static void vxlan_config_apply(struct net_device *dev,
max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
VXLAN_HEADROOM);
+ if (max_mtu < ETH_MIN_MTU)
+ max_mtu = ETH_MIN_MTU;
+
+ if (!changelink && !conf->mtu)
+ dev->mtu = max_mtu;
}
if (dev->mtu > max_mtu)
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index f7d228b..987f125 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
}
}
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ vif = wcn36xx_priv_to_vif(tmp);
+ if (hw->conf.flags & IEEE80211_CONF_PS) {
+ if (vif->bss_conf.ps) /* ps allowed ? */
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ } else {
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ }
+ }
+
mutex_unlock(&wcn->conf_mutex);
return 0;
@@ -747,17 +759,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
vif_priv->dtim_period = bss_conf->dtim_period;
}
- if (changed & BSS_CHANGED_PS) {
- wcn36xx_dbg(WCN36XX_DBG_MAC,
- "mac bss PS set %d\n",
- bss_conf->ps);
- if (bss_conf->ps) {
- wcn36xx_pmc_enter_bmps_state(wcn, vif);
- } else {
- wcn36xx_pmc_exit_bmps_state(wcn, vif);
- }
- }
-
if (changed & BSS_CHANGED_BSSID) {
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
bss_conf->bssid);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
index 589fe5f..1976b80 100644
--- a/drivers/net/wireless/ath/wcn36xx/pmc.c
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
if (WCN36XX_BMPS != vif_priv->pw_state) {
- wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
- return -EINVAL;
+ /* Unbalanced call or last BMPS enter failed */
+ wcn36xx_dbg(WCN36XX_DBG_PMC,
+ "Not in BMPS mode, no need to exit\n");
+ return -EALREADY;
}
wcn36xx_smd_exit_bmps(wcn, vif);
vif_priv->pw_state = WCN36XX_FULL_POWER;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 6a59d06..9be0b05 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -182,12 +182,9 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
err = request_firmware(&clm, clm_name, dev);
if (err) {
- if (err == -ENOENT) {
- brcmf_dbg(INFO, "continue with CLM data currently present in firmware\n");
- return 0;
- }
- brcmf_err("request CLM blob file failed (%d)\n", err);
- return err;
+ brcmf_info("no clm_blob available(err=%d), device may have limited channels available\n",
+ err);
+ return 0;
}
chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index d749abe..403e65c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -670,11 +670,15 @@ static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
return index & (q->n_window - 1);
}
-static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
+static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
struct iwl_txq *txq, int idx)
{
- return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq,
- idx);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans->cfg->use_tfh)
+ idx = iwl_pcie_get_cmd_index(txq, idx);
+
+ return txq->tfds + trans_pcie->tfd_size * idx;
}
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 16b345f..6d0a907 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
* idx is bounded by n_window
*/
@@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
lockdep_assert_held(&txq->lock);
iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
- iwl_pcie_get_tfd(trans_pcie, txq, idx));
+ iwl_pcie_get_tfd(trans, txq, idx));
/* free SKB */
if (txq->entries) {
@@ -364,11 +362,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd =
- iwl_pcie_get_tfd(trans_pcie, txq, idx);
+ struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
dma_addr_t tb_phys;
bool amsdu;
int i, len, tb1_len, tb2_len, hdr_len;
@@ -565,8 +561,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
u8 group_id = iwl_cmd_groupid(cmd->id);
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
- struct iwl_tfh_tfd *tfd =
- iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index fed6d84..3f85713 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i, num_tbs;
- void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index);
+ void *tfd = iwl_pcie_get_tfd(trans, txq, index);
/* Sanity check on number of chunks */
num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
@@ -2018,7 +2018,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+ iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
hdr_len);
@@ -2092,7 +2092,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
IEEE80211_CCMP_HDR_LEN : 0;
trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
+ iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
@@ -2425,7 +2425,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
IWL_FIRST_TB_SIZE);
- tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
+ tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
iwl_pcie_tfd_get_num_tbs(trans, tfd));
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 10b075a..f6d4a50f 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -489,6 +489,7 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
static spinlock_t hwsim_radio_lock;
static LIST_HEAD(hwsim_radios);
+static struct workqueue_struct *hwsim_wq;
static int hwsim_radio_idx;
static struct platform_driver mac80211_hwsim_driver = {
@@ -684,6 +685,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN);
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_TODS |
(ps ? IEEE80211_FCTL_PM : 0));
hdr->duration_id = cpu_to_le16(0);
memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
@@ -3119,6 +3121,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_CHANNELS])
param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
+ if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
+ GENL_SET_ERR_MSG(info, "too many channels specified");
+ return -EINVAL;
+ }
+
if (info->attrs[HWSIM_ATTR_NO_VIF])
param.no_vif = true;
@@ -3215,7 +3222,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
continue;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb) {
res = -ENOMEM;
goto out_err;
@@ -3341,7 +3348,7 @@ static void remove_user_radios(u32 portid)
if (entry->destroy_on_close && entry->portid == portid) {
list_del(&entry->list);
INIT_WORK(&entry->destroy_work, destroy_radio);
- schedule_work(&entry->destroy_work);
+ queue_work(hwsim_wq, &entry->destroy_work);
}
}
spin_unlock_bh(&hwsim_radio_lock);
@@ -3416,7 +3423,7 @@ static void __net_exit hwsim_exit_net(struct net *net)
list_del(&data->list);
INIT_WORK(&data->destroy_work, destroy_radio);
- schedule_work(&data->destroy_work);
+ queue_work(hwsim_wq, &data->destroy_work);
}
spin_unlock_bh(&hwsim_radio_lock);
}
@@ -3448,6 +3455,10 @@ static int __init init_mac80211_hwsim(void)
spin_lock_init(&hwsim_radio_lock);
+ hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
+ if (!hwsim_wq)
+ return -ENOMEM;
+
err = register_pernet_device(&hwsim_net_ops);
if (err)
return err;
@@ -3586,8 +3597,11 @@ static void __exit exit_mac80211_hwsim(void)
hwsim_exit_netlink();
mac80211_hwsim_free();
+ flush_workqueue(hwsim_wq);
+
unregister_netdev(hwsim_mon);
platform_driver_unregister(&mac80211_hwsim_driver);
unregister_pernet_device(&hwsim_net_ops);
+ destroy_workqueue(hwsim_wq);
}
module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c5a34671..9bd7dde 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1326,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_carrier_off(netdev);
+ xenbus_switch_state(dev, XenbusStateInitialising);
return netdev;
exit:
diff --git a/drivers/nubus/Makefile b/drivers/nubus/Makefile
index 21bda20..6d063cd 100644
--- a/drivers/nubus/Makefile
+++ b/drivers/nubus/Makefile
@@ -2,6 +2,6 @@
# Makefile for the nubus specific drivers.
#
-obj-y := nubus.o
+obj-y := nubus.o bus.o
obj-$(CONFIG_PROC_FS) += proc.o
diff --git a/drivers/nubus/bus.c b/drivers/nubus/bus.c
new file mode 100644
index 0000000..d306c34
--- /dev/null
+++ b/drivers/nubus/bus.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Bus implementation for the NuBus subsystem.
+//
+// Copyright (C) 2017 Finn Thain
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/nubus.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#define to_nubus_board(d) container_of(d, struct nubus_board, dev)
+#define to_nubus_driver(d) container_of(d, struct nubus_driver, driver)
+
+static int nubus_bus_match(struct device *dev, struct device_driver *driver)
+{
+ return 1;
+}
+
+static int nubus_device_probe(struct device *dev)
+{
+ struct nubus_driver *ndrv = to_nubus_driver(dev->driver);
+ int err = -ENODEV;
+
+ if (ndrv->probe)
+ err = ndrv->probe(to_nubus_board(dev));
+ return err;
+}
+
+static int nubus_device_remove(struct device *dev)
+{
+ struct nubus_driver *ndrv = to_nubus_driver(dev->driver);
+ int err = -ENODEV;
+
+ if (dev->driver && ndrv->remove)
+ err = ndrv->remove(to_nubus_board(dev));
+ return err;
+}
+
+struct bus_type nubus_bus_type = {
+ .name = "nubus",
+ .match = nubus_bus_match,
+ .probe = nubus_device_probe,
+ .remove = nubus_device_remove,
+};
+EXPORT_SYMBOL(nubus_bus_type);
+
+int nubus_driver_register(struct nubus_driver *ndrv)
+{
+ ndrv->driver.bus = &nubus_bus_type;
+ return driver_register(&ndrv->driver);
+}
+EXPORT_SYMBOL(nubus_driver_register);
+
+void nubus_driver_unregister(struct nubus_driver *ndrv)
+{
+ driver_unregister(&ndrv->driver);
+}
+EXPORT_SYMBOL(nubus_driver_unregister);
+
+static struct device nubus_parent = {
+ .init_name = "nubus",
+};
+
+int __init nubus_bus_register(void)
+{
+ int err;
+
+ err = device_register(&nubus_parent);
+ if (err)
+ return err;
+
+ err = bus_register(&nubus_bus_type);
+ if (!err)
+ return 0;
+
+ device_unregister(&nubus_parent);
+ return err;
+}
+
+static void nubus_device_release(struct device *dev)
+{
+ struct nubus_board *board = to_nubus_board(dev);
+ struct nubus_rsrc *fres, *tmp;
+
+ list_for_each_entry_safe(fres, tmp, &nubus_func_rsrcs, list)
+ if (fres->board == board) {
+ list_del(&fres->list);
+ kfree(fres);
+ }
+ kfree(board);
+}
+
+int nubus_device_register(struct nubus_board *board)
+{
+ board->dev.parent = &nubus_parent;
+ board->dev.release = nubus_device_release;
+ board->dev.bus = &nubus_bus_type;
+ dev_set_name(&board->dev, "slot.%X", board->slot);
+ return device_register(&board->dev);
+}
+
+static int nubus_print_device_name_fn(struct device *dev, void *data)
+{
+ struct nubus_board *board = to_nubus_board(dev);
+ struct seq_file *m = data;
+
+ seq_printf(m, "Slot %X: %s\n", board->slot, board->name);
+ return 0;
+}
+
+int nubus_proc_show(struct seq_file *m, void *data)
+{
+ return bus_for_each_dev(&nubus_bus_type, NULL, m,
+ nubus_print_device_name_fn);
+}
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index b793727..4621ff9 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -31,8 +32,7 @@
/* Globals */
-struct nubus_dev *nubus_devices;
-struct nubus_board *nubus_boards;
+LIST_HEAD(nubus_func_rsrcs);
/* Meaning of "bytelanes":
@@ -146,7 +146,7 @@ static inline void *nubus_rom_addr(int slot)
return (void *)(0xF1000000 + (slot << 24));
}
-static unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
+unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
{
unsigned char *p = nd->base;
@@ -161,7 +161,7 @@ static unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
pointed to with offsets) out of the card ROM. */
void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
- int len)
+ unsigned int len)
{
unsigned char *t = (unsigned char *)dest;
unsigned char *p = nubus_dirptr(dirent);
@@ -173,21 +173,49 @@ void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
}
EXPORT_SYMBOL(nubus_get_rsrc_mem);
-void nubus_get_rsrc_str(void *dest, const struct nubus_dirent *dirent,
- int len)
+unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent,
+ unsigned int len)
{
- unsigned char *t = (unsigned char *)dest;
+ char *t = dest;
unsigned char *p = nubus_dirptr(dirent);
- while (len) {
- *t = nubus_get_rom(&p, 1, dirent->mask);
- if (!*t++)
+ while (len > 1) {
+ unsigned char c = nubus_get_rom(&p, 1, dirent->mask);
+
+ if (!c)
break;
+ *t++ = c;
len--;
}
+ if (len > 0)
+ *t = '\0';
+ return t - dest;
}
EXPORT_SYMBOL(nubus_get_rsrc_str);
+void nubus_seq_write_rsrc_mem(struct seq_file *m,
+ const struct nubus_dirent *dirent,
+ unsigned int len)
+{
+ unsigned long buf[32];
+ unsigned int buf_size = sizeof(buf);
+ unsigned char *p = nubus_dirptr(dirent);
+
+ /* If possible, write out full buffers */
+ while (len >= buf_size) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ buf[i] = nubus_get_rom(&p, sizeof(buf[0]),
+ dirent->mask);
+ seq_write(m, buf, buf_size);
+ len -= buf_size;
+ }
+ /* If not, write out individual bytes */
+ while (len--)
+ seq_putc(m, nubus_get_rom(&p, 1, dirent->mask));
+}
+
int nubus_get_root_dir(const struct nubus_board *board,
struct nubus_dir *dir)
{
@@ -199,12 +227,11 @@ int nubus_get_root_dir(const struct nubus_board *board,
EXPORT_SYMBOL(nubus_get_root_dir);
/* This is a slyly renamed version of the above */
-int nubus_get_func_dir(const struct nubus_dev *dev,
- struct nubus_dir *dir)
+int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir)
{
- dir->ptr = dir->base = dev->directory;
+ dir->ptr = dir->base = fres->directory;
dir->done = 0;
- dir->mask = dev->board->lanes;
+ dir->mask = fres->board->lanes;
return 0;
}
EXPORT_SYMBOL(nubus_get_func_dir);
@@ -277,51 +304,20 @@ EXPORT_SYMBOL(nubus_rewinddir);
/* Driver interface functions, more or less like in pci.c */
-struct nubus_dev*
-nubus_find_device(unsigned short category, unsigned short type,
- unsigned short dr_hw, unsigned short dr_sw,
- const struct nubus_dev *from)
-{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->category == category && itor->type == type &&
- itor->dr_hw == dr_hw && itor->dr_sw == dr_sw)
- return itor;
- itor = itor->next;
- }
- return NULL;
-}
-EXPORT_SYMBOL(nubus_find_device);
-
-struct nubus_dev*
-nubus_find_type(unsigned short category, unsigned short type,
- const struct nubus_dev *from)
+struct nubus_rsrc *nubus_first_rsrc_or_null(void)
{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->category == category && itor->type == type)
- return itor;
- itor = itor->next;
- }
- return NULL;
+ return list_first_entry_or_null(&nubus_func_rsrcs, struct nubus_rsrc,
+ list);
}
-EXPORT_SYMBOL(nubus_find_type);
+EXPORT_SYMBOL(nubus_first_rsrc_or_null);
-struct nubus_dev*
-nubus_find_slot(unsigned int slot, const struct nubus_dev *from)
+struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from)
{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->board->slot == slot)
- return itor;
- itor = itor->next;
- }
- return NULL;
+ if (list_is_last(&from->list, &nubus_func_rsrcs))
+ return NULL;
+ return list_next_entry(from, list);
}
-EXPORT_SYMBOL(nubus_find_slot);
+EXPORT_SYMBOL(nubus_next_rsrc_or_null);
int
nubus_find_rsrc(struct nubus_dir *dir, unsigned char rsrc_type,
@@ -339,31 +335,83 @@ EXPORT_SYMBOL(nubus_find_rsrc);
looking at, and print out lots and lots of information from the
resource blocks. */
-/* FIXME: A lot of this stuff will eventually be useful after
- initialization, for intelligently probing Ethernet and video chips,
- among other things. The rest of it should go in the /proc code.
- For now, we just use it to give verbose boot logs. */
+static int __init nubus_get_block_rsrc_dir(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *parent)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+
+ nubus_get_subdir(parent, &dir);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
-static int __init nubus_show_display_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+ while (nubus_readdir(&dir, &ent) != -1) {
+ u32 size;
+
+ nubus_get_rsrc_mem(&size, &ent, 4);
+ pr_debug(" block (0x%x), size %d\n", ent.type, size);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, size);
+ }
+ return 0;
+}
+
+static int __init nubus_get_display_vidmode(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *parent)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+
+ nubus_get_subdir(parent, &dir);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
+
+ while (nubus_readdir(&dir, &ent) != -1) {
+ switch (ent.type) {
+ case 1: /* mVidParams */
+ case 2: /* mTable */
+ {
+ u32 size;
+
+ nubus_get_rsrc_mem(&size, &ent, 4);
+ pr_debug(" block (0x%x), size %d\n", ent.type,
+ size);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, size);
+ break;
+ }
+ default:
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent.type, ent.data);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 0);
+ }
+ }
+ return 0;
+}
+
+static int __init nubus_get_display_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_GAMMADIR:
- pr_info(" gamma directory offset: 0x%06x\n", ent->data);
+ pr_debug(" gamma directory offset: 0x%06x\n", ent->data);
+ nubus_get_block_rsrc_dir(fres->board, procdir, ent);
break;
case 0x0080 ... 0x0085:
- pr_info(" mode %02X info offset: 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" mode 0x%02x info offset: 0x%06x\n",
+ ent->type, ent->data);
+ nubus_get_display_vidmode(fres->board, procdir, ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_network_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_network_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_MAC_ADDRESS:
@@ -371,18 +419,21 @@ static int __init nubus_show_network_resource(struct nubus_dev *dev,
char addr[6];
nubus_get_rsrc_mem(addr, ent, 6);
- pr_info(" MAC address: %pM\n", addr);
+ pr_debug(" MAC address: %pM\n", addr);
+ nubus_proc_add_rsrc_mem(procdir, ent, 6);
break;
}
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_cpu_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_MEMINFO:
@@ -390,8 +441,9 @@ static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
unsigned long meminfo[2];
nubus_get_rsrc_mem(&meminfo, ent, 8);
- pr_info(" memory: [ 0x%08lx 0x%08lx ]\n",
- meminfo[0], meminfo[1]);
+ pr_debug(" memory: [ 0x%08lx 0x%08lx ]\n",
+ meminfo[0], meminfo[1]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 8);
break;
}
case NUBUS_RESID_ROMINFO:
@@ -399,57 +451,60 @@ static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
unsigned long rominfo[2];
nubus_get_rsrc_mem(&rominfo, ent, 8);
- pr_info(" ROM: [ 0x%08lx 0x%08lx ]\n",
- rominfo[0], rominfo[1]);
+ pr_debug(" ROM: [ 0x%08lx 0x%08lx ]\n",
+ rominfo[0], rominfo[1]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 8);
break;
}
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_private_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_private_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
- switch (dev->category) {
+ switch (fres->category) {
case NUBUS_CAT_DISPLAY:
- nubus_show_display_resource(dev, ent);
+ nubus_get_display_resource(fres, procdir, ent);
break;
case NUBUS_CAT_NETWORK:
- nubus_show_network_resource(dev, ent);
+ nubus_get_network_resource(fres, procdir, ent);
break;
case NUBUS_CAT_CPU:
- nubus_show_cpu_resource(dev, ent);
+ nubus_get_cpu_resource(fres, procdir, ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static struct nubus_dev * __init
+static struct nubus_rsrc * __init
nubus_get_functional_resource(struct nubus_board *board, int slot,
const struct nubus_dirent *parent)
{
struct nubus_dir dir;
struct nubus_dirent ent;
- struct nubus_dev *dev;
+ struct nubus_rsrc *fres;
- pr_info(" Function 0x%02x:\n", parent->type);
+ pr_debug(" Functional resource 0x%02x:\n", parent->type);
nubus_get_subdir(parent, &dir);
-
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(board->procdir, parent, board);
/* Actually we should probably panic if this fails */
- if ((dev = kzalloc(sizeof(*dev), GFP_ATOMIC)) == NULL)
+ fres = kzalloc(sizeof(*fres), GFP_ATOMIC);
+ if (!fres)
return NULL;
- dev->resid = parent->type;
- dev->directory = dir.base;
- dev->board = board;
+ fres->resid = parent->type;
+ fres->directory = dir.base;
+ fres->board = board;
while (nubus_readdir(&dir, &ent) != -1) {
switch (ent.type) {
@@ -458,130 +513,96 @@ nubus_get_functional_resource(struct nubus_board *board, int slot,
unsigned short nbtdata[4];
nubus_get_rsrc_mem(nbtdata, &ent, 8);
- dev->category = nbtdata[0];
- dev->type = nbtdata[1];
- dev->dr_sw = nbtdata[2];
- dev->dr_hw = nbtdata[3];
- pr_info(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
- nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ fres->category = nbtdata[0];
+ fres->type = nbtdata[1];
+ fres->dr_sw = nbtdata[2];
+ fres->dr_hw = nbtdata[3];
+ pr_debug(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
+ nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 8);
break;
}
case NUBUS_RESID_NAME:
{
- nubus_get_rsrc_str(dev->name, &ent, 64);
- pr_info(" name: %s\n", dev->name);
+ char name[64];
+ unsigned int len;
+
+ len = nubus_get_rsrc_str(name, &ent, sizeof(name));
+ pr_debug(" name: %s\n", name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
break;
}
case NUBUS_RESID_DRVRDIR:
{
/* MacOS driver. If we were NetBSD we might
use this :-) */
- struct nubus_dir drvr_dir;
- struct nubus_dirent drvr_ent;
-
- nubus_get_subdir(&ent, &drvr_dir);
- nubus_readdir(&drvr_dir, &drvr_ent);
- dev->driver = nubus_dirptr(&drvr_ent);
- pr_info(" driver at: 0x%p\n", dev->driver);
+ pr_debug(" driver directory offset: 0x%06x\n",
+ ent.data);
+ nubus_get_block_rsrc_dir(board, dir.procdir, &ent);
break;
}
case NUBUS_RESID_MINOR_BASEOS:
+ {
/* We will need this in order to support
multiple framebuffers. It might be handy
for Ethernet as well */
- nubus_get_rsrc_mem(&dev->iobase, &ent, 4);
- pr_info(" memory offset: 0x%08lx\n", dev->iobase);
+ u32 base_offset;
+
+ nubus_get_rsrc_mem(&base_offset, &ent, 4);
+ pr_debug(" memory offset: 0x%08x\n", base_offset);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 4);
break;
+ }
case NUBUS_RESID_MINOR_LENGTH:
+ {
/* Ditto */
- nubus_get_rsrc_mem(&dev->iosize, &ent, 4);
- pr_info(" memory length: 0x%08lx\n", dev->iosize);
+ u32 length;
+
+ nubus_get_rsrc_mem(&length, &ent, 4);
+ pr_debug(" memory length: 0x%08x\n", length);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 4);
break;
+ }
case NUBUS_RESID_FLAGS:
- dev->flags = ent.data;
- pr_info(" flags: 0x%06x\n", dev->flags);
+ pr_debug(" flags: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_HWDEVID:
- dev->hwdevid = ent.data;
- pr_info(" hwdevid: 0x%06x\n", dev->hwdevid);
+ pr_debug(" hwdevid: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
default:
/* Local/Private resources have their own
function */
- nubus_show_private_resource(dev, &ent);
+ nubus_get_private_resource(fres, dir.procdir, &ent);
}
}
- return dev;
-}
-
-/* This is cool. */
-static int __init nubus_get_vidnames(struct nubus_board *board,
- const struct nubus_dirent *parent)
-{
- struct nubus_dir dir;
- struct nubus_dirent ent;
-
- /* FIXME: obviously we want to put this in a header file soon */
- struct vidmode {
- u32 size;
- /* Don't know what this is yet */
- u16 id;
- /* Longest one I've seen so far is 26 characters */
- char name[32];
- };
-
- pr_info(" video modes supported:\n");
- nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
-
- while (nubus_readdir(&dir, &ent) != -1) {
- struct vidmode mode;
- u32 size;
-
- /* First get the length */
- nubus_get_rsrc_mem(&size, &ent, 4);
-
- /* Now clobber the whole thing */
- if (size > sizeof(mode) - 1)
- size = sizeof(mode) - 1;
- memset(&mode, 0, sizeof(mode));
- nubus_get_rsrc_mem(&mode, &ent, size);
- pr_info(" %02X: (%02X) %s\n", ent.type,
- mode.id, mode.name);
- }
- return 0;
+ return fres;
}
/* This is *really* cool. */
static int __init nubus_get_icon(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
const struct nubus_dirent *ent)
{
/* Should be 32x32 if my memory serves me correctly */
- unsigned char icon[128];
- int x, y;
+ u32 icon[32];
+ int i;
nubus_get_rsrc_mem(&icon, ent, 128);
- pr_info(" icon:\n");
-
- /* We should actually plot these somewhere in the framebuffer
- init. This is just to demonstrate that they do, in fact,
- exist */
- for (y = 0; y < 32; y++) {
- pr_info(" ");
- for (x = 0; x < 32; x++) {
- if (icon[y * 4 + x / 8] & (0x80 >> (x % 8)))
- pr_cont("*");
- else
- pr_cont(" ");
- }
- pr_cont("\n");
- }
+ pr_debug(" icon:\n");
+ for (i = 0; i < 8; i++)
+ pr_debug(" %08x %08x %08x %08x\n",
+ icon[i * 4 + 0], icon[i * 4 + 1],
+ icon[i * 4 + 2], icon[i * 4 + 3]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 128);
+
return 0;
}
static int __init nubus_get_vendorinfo(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
const struct nubus_dirent *parent)
{
struct nubus_dir dir;
@@ -589,19 +610,20 @@ static int __init nubus_get_vendorinfo(struct nubus_board *board,
static char *vendor_fields[6] = { "ID", "serial", "revision",
"part", "date", "unknown field" };
- pr_info(" vendor info:\n");
+ pr_debug(" vendor info:\n");
nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
while (nubus_readdir(&dir, &ent) != -1) {
char name[64];
+ unsigned int len;
/* These are all strings, we think */
- nubus_get_rsrc_str(name, &ent, 64);
- if (ent.type > 5)
+ len = nubus_get_rsrc_str(name, &ent, sizeof(name));
+ if (ent.type < 1 || ent.type > 5)
ent.type = 5;
- pr_info(" %s: %s\n", vendor_fields[ent.type - 1], name);
+ pr_debug(" %s: %s\n", vendor_fields[ent.type - 1], name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
}
return 0;
}
@@ -612,9 +634,9 @@ static int __init nubus_get_board_resource(struct nubus_board *board, int slot,
struct nubus_dir dir;
struct nubus_dirent ent;
+ pr_debug(" Board resource 0x%02x:\n", parent->type);
nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(board->procdir, parent, board);
while (nubus_readdir(&dir, &ent) != -1) {
switch (ent.type) {
@@ -625,64 +647,81 @@ static int __init nubus_get_board_resource(struct nubus_board *board, int slot,
useful except insofar as it tells us that
we really are looking at a board resource. */
nubus_get_rsrc_mem(nbtdata, &ent, 8);
- pr_info(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
- nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ pr_debug(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
+ nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
if (nbtdata[0] != 1 || nbtdata[1] != 0 ||
nbtdata[2] != 0 || nbtdata[3] != 0)
- pr_err("this sResource is not a board resource!\n");
+ pr_err("Slot %X: sResource is not a board resource!\n",
+ slot);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 8);
break;
}
case NUBUS_RESID_NAME:
- nubus_get_rsrc_str(board->name, &ent, 64);
- pr_info(" name: %s\n", board->name);
+ {
+ unsigned int len;
+
+ len = nubus_get_rsrc_str(board->name, &ent,
+ sizeof(board->name));
+ pr_debug(" name: %s\n", board->name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
break;
+ }
case NUBUS_RESID_ICON:
- nubus_get_icon(board, &ent);
+ nubus_get_icon(board, dir.procdir, &ent);
break;
case NUBUS_RESID_BOARDID:
- pr_info(" board id: 0x%x\n", ent.data);
+ pr_debug(" board id: 0x%x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_PRIMARYINIT:
- pr_info(" primary init offset: 0x%06x\n", ent.data);
+ pr_debug(" primary init offset: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_VENDORINFO:
- nubus_get_vendorinfo(board, &ent);
+ nubus_get_vendorinfo(board, dir.procdir, &ent);
break;
case NUBUS_RESID_FLAGS:
- pr_info(" flags: 0x%06x\n", ent.data);
+ pr_debug(" flags: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_HWDEVID:
- pr_info(" hwdevid: 0x%06x\n", ent.data);
+ pr_debug(" hwdevid: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_SECONDINIT:
- pr_info(" secondary init offset: 0x%06x\n", ent.data);
+ pr_debug(" secondary init offset: 0x%06x\n",
+ ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
/* WTF isn't this in the functional resources? */
case NUBUS_RESID_VIDNAMES:
- nubus_get_vidnames(board, &ent);
+ pr_debug(" vidnames directory offset: 0x%06x\n",
+ ent.data);
+ nubus_get_block_rsrc_dir(board, dir.procdir, &ent);
break;
/* Same goes for this */
case NUBUS_RESID_VIDMODES:
- pr_info(" video mode parameter directory offset: 0x%06x\n",
- ent.data);
+ pr_debug(" video mode parameter directory offset: 0x%06x\n",
+ ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent.type, ent.data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent.type, ent.data);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 0);
}
}
return 0;
}
-/* Add a board (might be many devices) to the list */
-static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
+static void __init nubus_add_board(int slot, int bytelanes)
{
struct nubus_board *board;
- struct nubus_board **boardp;
unsigned char *rp;
unsigned long dpat;
struct nubus_dir dir;
struct nubus_dirent ent;
+ int prev_resid = -1;
/* Move to the start of the format block */
rp = nubus_rom_addr(slot);
@@ -690,19 +729,19 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
/* Actually we should probably panic if this fails */
if ((board = kzalloc(sizeof(*board), GFP_ATOMIC)) == NULL)
- return NULL;
+ return;
board->fblock = rp;
/* Dump the format block for debugging purposes */
pr_debug("Slot %X, format block at 0x%p:\n", slot, rp);
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
rp = board->fblock;
board->slot = slot;
@@ -722,10 +761,10 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
/* Directory offset should be small and negative... */
if (!(board->doffset & 0x00FF0000))
- pr_warn("Dodgy doffset!\n");
+ pr_warn("Slot %X: Dodgy doffset!\n", slot);
dpat = nubus_get_rom(&rp, 4, bytelanes);
if (dpat != NUBUS_TEST_PATTERN)
- pr_warn("Wrong test pattern %08lx!\n", dpat);
+ pr_warn("Slot %X: Wrong test pattern %08lx!\n", slot, dpat);
/*
* I wonder how the CRC is meant to work -
@@ -742,53 +781,52 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
nubus_get_root_dir(board, &dir);
/* We're ready to rock */
- pr_info("Slot %X:\n", slot);
+ pr_debug("Slot %X resources:\n", slot);
/* Each slot should have one board resource and any number of
- functional resources. So we'll fill in some fields in the
- struct nubus_board from the board resource, then walk down
- the list of functional resources, spinning out a nubus_dev
- for each of them. */
+ * functional resources. So we'll fill in some fields in the
+ * struct nubus_board from the board resource, then walk down
+ * the list of functional resources, spinning out a nubus_rsrc
+ * for each of them.
+ */
if (nubus_readdir(&dir, &ent) == -1) {
/* We can't have this! */
- pr_err("Board resource not found!\n");
- return NULL;
- } else {
- pr_info(" Board resource:\n");
- nubus_get_board_resource(board, slot, &ent);
+ pr_err("Slot %X: Board resource not found!\n", slot);
+ kfree(board);
+ return;
}
+ if (ent.type < 1 || ent.type > 127)
+ pr_warn("Slot %X: Board resource ID is invalid!\n", slot);
+
+ board->procdir = nubus_proc_add_board(board);
+
+ nubus_get_board_resource(board, slot, &ent);
+
while (nubus_readdir(&dir, &ent) != -1) {
- struct nubus_dev *dev;
- struct nubus_dev **devp;
+ struct nubus_rsrc *fres;
- dev = nubus_get_functional_resource(board, slot, &ent);
- if (dev == NULL)
+ fres = nubus_get_functional_resource(board, slot, &ent);
+ if (fres == NULL)
continue;
- /* We zeroed this out above */
- if (board->first_dev == NULL)
- board->first_dev = dev;
+ /* Resources should appear in ascending ID order. This sanity
+ * check prevents duplicate resource IDs.
+ */
+ if (fres->resid <= prev_resid) {
+ kfree(fres);
+ continue;
+ }
+ prev_resid = fres->resid;
- /* Put it on the global NuBus device chain. Keep entries in order. */
- for (devp = &nubus_devices; *devp != NULL;
- devp = &((*devp)->next))
- /* spin */;
- *devp = dev;
- dev->next = NULL;
+ list_add_tail(&fres->list, &nubus_func_rsrcs);
}
- /* Put it on the global NuBus board chain. Keep entries in order. */
- for (boardp = &nubus_boards; *boardp != NULL;
- boardp = &((*boardp)->next))
- /* spin */;
- *boardp = board;
- board->next = NULL;
-
- return board;
+ if (nubus_device_register(board))
+ put_device(&board->dev);
}
-void __init nubus_probe_slot(int slot)
+static void __init nubus_probe_slot(int slot)
{
unsigned char dp;
unsigned char *rp;
@@ -796,11 +834,8 @@ void __init nubus_probe_slot(int slot)
rp = nubus_rom_addr(slot);
for (i = 4; i; i--) {
- int card_present;
-
rp--;
- card_present = hwreg_present(rp);
- if (!card_present)
+ if (!hwreg_present(rp))
continue;
dp = *rp;
@@ -822,10 +857,11 @@ void __init nubus_probe_slot(int slot)
}
}
-void __init nubus_scan_bus(void)
+static void __init nubus_scan_bus(void)
{
int slot;
+ pr_info("NuBus: Scanning NuBus slots.\n");
for (slot = 9; slot < 15; slot++) {
nubus_probe_slot(slot);
}
@@ -833,14 +869,16 @@ void __init nubus_scan_bus(void)
static int __init nubus_init(void)
{
+ int err;
+
if (!MACH_IS_MAC)
return 0;
- pr_info("NuBus: Scanning NuBus slots.\n");
- nubus_devices = NULL;
- nubus_boards = NULL;
- nubus_scan_bus();
nubus_proc_init();
+ err = nubus_bus_register();
+ if (err)
+ return err;
+ nubus_scan_bus();
return 0;
}
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index 004a122..c2e5a7e 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -11,39 +11,37 @@
structure in /proc analogous to the structure of the NuBus ROM
resources.
- Therefore each NuBus device is in fact a directory, which may in
- turn contain subdirectories. The "files" correspond to NuBus
- resource records. For those types of records which we know how to
- convert to formats that are meaningful to userspace (mostly just
- icons) these files will provide "cooked" data. Otherwise they will
- simply provide raw access (read-only of course) to the ROM. */
+ Therefore each board function gets a directory, which may in turn
+ contain subdirectories. Each slot resource is a file. Unrecognized
+ resources are empty files, since every resource ID requires a special
+ case (e.g. if the resource ID implies a directory or block, then its
+ value has to be interpreted as a slot ROM pointer etc.).
+ */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/nubus.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
-
#include <linux/uaccess.h>
#include <asm/byteorder.h>
+/*
+ * /proc/bus/nubus/devices stuff
+ */
+
static int
nubus_devices_proc_show(struct seq_file *m, void *v)
{
- struct nubus_dev *dev = nubus_devices;
-
- while (dev) {
- seq_printf(m, "%x\t%04x %04x %04x %04x",
- dev->board->slot,
- dev->category,
- dev->type,
- dev->dr_sw,
- dev->dr_hw);
- seq_printf(m, "\t%08lx\n", dev->board->slot_addr);
- dev = dev->next;
- }
+ struct nubus_rsrc *fres;
+
+ for_each_func_rsrc(fres)
+ seq_printf(m, "%x\t%04x %04x %04x %04x\t%08lx\n",
+ fres->board->slot, fres->category, fres->type,
+ fres->dr_sw, fres->dr_hw, fres->board->slot_addr);
return 0;
}
@@ -61,174 +59,163 @@ static const struct file_operations nubus_devices_proc_fops = {
static struct proc_dir_entry *proc_bus_nubus_dir;
-static const struct file_operations nubus_proc_subdir_fops = {
-#warning Need to set some I/O handlers here
-};
+/*
+ * /proc/bus/nubus/x/ stuff
+ */
-static void nubus_proc_subdir(struct nubus_dev* dev,
- struct proc_dir_entry* parent,
- struct nubus_dir* dir)
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board)
{
- struct nubus_dirent ent;
-
- /* Some of these are directories, others aren't */
- while (nubus_readdir(dir, &ent) != -1) {
- char name[8];
- struct proc_dir_entry* e;
-
- sprintf(name, "%x", ent.type);
- e = proc_create(name, S_IFREG | S_IRUGO | S_IWUSR, parent,
- &nubus_proc_subdir_fops);
- if (!e)
- return;
- }
+ char name[2];
+
+ if (!proc_bus_nubus_dir)
+ return NULL;
+ snprintf(name, sizeof(name), "%x", board->slot);
+ return proc_mkdir(name, proc_bus_nubus_dir);
}
-/* Can't do this recursively since the root directory is structured
- somewhat differently from the subdirectories */
-static void nubus_proc_populate(struct nubus_dev* dev,
- struct proc_dir_entry* parent,
- struct nubus_dir* root)
+/* The PDE private data for any directory under /proc/bus/nubus/x/
+ * is the bytelanes value for the board in slot x.
+ */
+
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board)
{
- struct nubus_dirent ent;
-
- /* We know these are all directories (board resource + one or
- more functional resources) */
- while (nubus_readdir(root, &ent) != -1) {
- char name[8];
- struct proc_dir_entry* e;
- struct nubus_dir dir;
-
- sprintf(name, "%x", ent.type);
- e = proc_mkdir(name, parent);
- if (!e) return;
-
- /* And descend */
- if (nubus_get_subdir(&ent, &dir) == -1) {
- /* This shouldn't happen */
- printk(KERN_ERR "NuBus root directory node %x:%x has no subdir!\n",
- dev->board->slot, ent.type);
- continue;
- } else {
- nubus_proc_subdir(dev, e, &dir);
- }
- }
+ char name[9];
+ int lanes = board->lanes;
+
+ if (!procdir)
+ return NULL;
+ snprintf(name, sizeof(name), "%x", ent->type);
+ return proc_mkdir_data(name, 0555, procdir, (void *)lanes);
}
-int nubus_proc_attach_device(struct nubus_dev *dev)
+/* The PDE private data for a file under /proc/bus/nubus/x/ is a pointer to
+ * an instance of the following structure, which gives the location and size
+ * of the resource data in the slot ROM. For slot resources which hold only a
+ * small integer, this integer value is stored directly and size is set to 0.
+ * A NULL private data pointer indicates an unrecognized resource.
+ */
+
+struct nubus_proc_pde_data {
+ unsigned char *res_ptr;
+ unsigned int res_size;
+};
+
+static struct nubus_proc_pde_data *
+nubus_proc_alloc_pde_data(unsigned char *ptr, unsigned int size)
{
- struct proc_dir_entry *e;
- struct nubus_dir root;
- char name[8];
-
- if (dev == NULL) {
- printk(KERN_ERR
- "NULL pointer in nubus_proc_attach_device, shoot the programmer!\n");
- return -1;
- }
-
- if (dev->board == NULL) {
- printk(KERN_ERR
- "NULL pointer in nubus_proc_attach_device, shoot the programmer!\n");
- printk("dev = %p, dev->board = %p\n", dev, dev->board);
- return -1;
- }
-
- /* Create a directory */
- sprintf(name, "%x", dev->board->slot);
- e = dev->procdir = proc_mkdir(name, proc_bus_nubus_dir);
- if (!e)
- return -ENOMEM;
+ struct nubus_proc_pde_data *pde_data;
- /* Now recursively populate it with files */
- nubus_get_root_dir(dev->board, &root);
- nubus_proc_populate(dev, e, &root);
+ pde_data = kmalloc(sizeof(*pde_data), GFP_KERNEL);
+ if (!pde_data)
+ return NULL;
- return 0;
+ pde_data->res_ptr = ptr;
+ pde_data->res_size = size;
+ return pde_data;
}
-EXPORT_SYMBOL(nubus_proc_attach_device);
-/*
- * /proc/nubus stuff
- */
-static int nubus_proc_show(struct seq_file *m, void *v)
+static int nubus_proc_rsrc_show(struct seq_file *m, void *v)
{
- const struct nubus_board *board = v;
+ struct inode *inode = m->private;
+ struct nubus_proc_pde_data *pde_data;
- /* Display header on line 1 */
- if (v == SEQ_START_TOKEN)
- seq_puts(m, "Nubus devices found:\n");
- else
- seq_printf(m, "Slot %X: %s\n", board->slot, board->name);
+ pde_data = PDE_DATA(inode);
+ if (!pde_data)
+ return 0;
+
+ if (pde_data->res_size > m->size)
+ return -EFBIG;
+
+ if (pde_data->res_size) {
+ int lanes = (int)proc_get_parent_data(inode);
+ struct nubus_dirent ent;
+
+ if (!lanes)
+ return 0;
+
+ ent.mask = lanes;
+ ent.base = pde_data->res_ptr;
+ ent.data = 0;
+ nubus_seq_write_rsrc_mem(m, &ent, pde_data->res_size);
+ } else {
+ unsigned int data = (unsigned int)pde_data->res_ptr;
+
+ seq_putc(m, data >> 16);
+ seq_putc(m, data >> 8);
+ seq_putc(m, data >> 0);
+ }
return 0;
}
-static void *nubus_proc_start(struct seq_file *m, loff_t *_pos)
+static int nubus_proc_rsrc_open(struct inode *inode, struct file *file)
{
- struct nubus_board *board;
- unsigned pos;
-
- if (*_pos > LONG_MAX)
- return NULL;
- pos = *_pos;
- if (pos == 0)
- return SEQ_START_TOKEN;
- for (board = nubus_boards; board; board = board->next)
- if (--pos == 0)
- break;
- return board;
+ return single_open(file, nubus_proc_rsrc_show, inode);
}
-static void *nubus_proc_next(struct seq_file *p, void *v, loff_t *_pos)
+static const struct file_operations nubus_proc_rsrc_fops = {
+ .open = nubus_proc_rsrc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size)
{
- /* Walk the list of NuBus boards */
- struct nubus_board *board = v;
-
- ++*_pos;
- if (v == SEQ_START_TOKEN)
- board = nubus_boards;
- else if (board)
- board = board->next;
- return board;
+ char name[9];
+ struct nubus_proc_pde_data *pde_data;
+
+ if (!procdir)
+ return;
+
+ snprintf(name, sizeof(name), "%x", ent->type);
+ if (size)
+ pde_data = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
+ else
+ pde_data = NULL;
+ proc_create_data(name, S_IFREG | 0444, procdir,
+ &nubus_proc_rsrc_fops, pde_data);
}
-static void nubus_proc_stop(struct seq_file *p, void *v)
+void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
+ char name[9];
+ unsigned char *data = (unsigned char *)ent->data;
+
+ if (!procdir)
+ return;
+
+ snprintf(name, sizeof(name), "%x", ent->type);
+ proc_create_data(name, S_IFREG | 0444, procdir,
+ &nubus_proc_rsrc_fops,
+ nubus_proc_alloc_pde_data(data, 0));
}
-static const struct seq_operations nubus_proc_seqops = {
- .start = nubus_proc_start,
- .next = nubus_proc_next,
- .stop = nubus_proc_stop,
- .show = nubus_proc_show,
-};
+/*
+ * /proc/nubus stuff
+ */
static int nubus_proc_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &nubus_proc_seqops);
+ return single_open(file, nubus_proc_show, NULL);
}
static const struct file_operations nubus_proc_fops = {
.open = nubus_proc_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = single_release,
};
-void __init proc_bus_nubus_add_devices(void)
-{
- struct nubus_dev *dev;
-
- for(dev = nubus_devices; dev; dev = dev->next)
- nubus_proc_attach_device(dev);
-}
-
void __init nubus_proc_init(void)
{
proc_create("nubus", 0, NULL, &nubus_proc_fops);
- if (!MACH_IS_MAC)
- return;
proc_bus_nubus_dir = proc_mkdir("bus/nubus", NULL);
+ if (!proc_bus_nubus_dir)
+ return;
proc_create("devices", 0, proc_bus_nubus_dir, &nubus_devices_proc_fops);
- proc_bus_nubus_add_devices();
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index e949e33..c586bcd 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -211,12 +211,12 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
return ret;
}
-static int btt_log_read_pair(struct arena_info *arena, u32 lane,
- struct log_entry *ent)
+static int btt_log_group_read(struct arena_info *arena, u32 lane,
+ struct log_group *log)
{
return arena_read_bytes(arena,
- arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
- 2 * LOG_ENT_SIZE, 0);
+ arena->logoff + (lane * LOG_GRP_SIZE), log,
+ LOG_GRP_SIZE, 0);
}
static struct dentry *debugfs_root;
@@ -256,6 +256,8 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
+ debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
+ debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
}
static void btt_debugfs_init(struct btt *btt)
@@ -274,6 +276,11 @@ static void btt_debugfs_init(struct btt *btt)
}
}
+static u32 log_seq(struct log_group *log, int log_idx)
+{
+ return le32_to_cpu(log->ent[log_idx].seq);
+}
+
/*
* This function accepts two log entries, and uses the
* sequence number to find the 'older' entry.
@@ -283,8 +290,10 @@ static void btt_debugfs_init(struct btt *btt)
*
* TODO The logic feels a bit kludge-y. make it better..
*/
-static int btt_log_get_old(struct log_entry *ent)
+static int btt_log_get_old(struct arena_info *a, struct log_group *log)
{
+ int idx0 = a->log_index[0];
+ int idx1 = a->log_index[1];
int old;
/*
@@ -292,23 +301,23 @@ static int btt_log_get_old(struct log_entry *ent)
* the next time, the following logic works out to put this
* (next) entry into [1]
*/
- if (ent[0].seq == 0) {
- ent[0].seq = cpu_to_le32(1);
+ if (log_seq(log, idx0) == 0) {
+ log->ent[idx0].seq = cpu_to_le32(1);
return 0;
}
- if (ent[0].seq == ent[1].seq)
+ if (log_seq(log, idx0) == log_seq(log, idx1))
return -EINVAL;
- if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
+ if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
return -EINVAL;
- if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
- if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
+ if (log_seq(log, idx0) < log_seq(log, idx1)) {
+ if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
old = 0;
else
old = 1;
} else {
- if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
+ if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
old = 1;
else
old = 0;
@@ -328,17 +337,18 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
{
int ret;
int old_ent, ret_ent;
- struct log_entry log[2];
+ struct log_group log;
- ret = btt_log_read_pair(arena, lane, log);
+ ret = btt_log_group_read(arena, lane, &log);
if (ret)
return -EIO;
- old_ent = btt_log_get_old(log);
+ old_ent = btt_log_get_old(arena, &log);
if (old_ent < 0 || old_ent > 1) {
dev_err(to_dev(arena),
"log corruption (%d): lane %d seq [%d, %d]\n",
- old_ent, lane, log[0].seq, log[1].seq);
+ old_ent, lane, log.ent[arena->log_index[0]].seq,
+ log.ent[arena->log_index[1]].seq);
/* TODO set error state? */
return -EIO;
}
@@ -346,7 +356,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
ret_ent = (old_flag ? old_ent : (1 - old_ent));
if (ent != NULL)
- memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
+ memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
return ret_ent;
}
@@ -360,17 +370,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane,
u32 sub, struct log_entry *ent, unsigned long flags)
{
int ret;
- /*
- * Ignore the padding in log_entry for calculating log_half.
- * The entry is 'committed' when we write the sequence number,
- * and we want to ensure that that is the last thing written.
- * We don't bother writing the padding as that would be extra
- * media wear and write amplification
- */
- unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
- u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
+ u32 group_slot = arena->log_index[sub];
+ unsigned int log_half = LOG_ENT_SIZE / 2;
void *src = ent;
+ u64 ns_off;
+ ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
+ (group_slot * LOG_ENT_SIZE);
/* split the 16B write into atomic, durable halves */
ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
if (ret)
@@ -453,7 +459,7 @@ static int btt_log_init(struct arena_info *arena)
{
size_t logsize = arena->info2off - arena->logoff;
size_t chunk_size = SZ_4K, offset = 0;
- struct log_entry log;
+ struct log_entry ent;
void *zerobuf;
int ret;
u32 i;
@@ -485,11 +491,11 @@ static int btt_log_init(struct arena_info *arena)
}
for (i = 0; i < arena->nfree; i++) {
- log.lba = cpu_to_le32(i);
- log.old_map = cpu_to_le32(arena->external_nlba + i);
- log.new_map = cpu_to_le32(arena->external_nlba + i);
- log.seq = cpu_to_le32(LOG_SEQ_INIT);
- ret = __btt_log_write(arena, i, 0, &log, 0);
+ ent.lba = cpu_to_le32(i);
+ ent.old_map = cpu_to_le32(arena->external_nlba + i);
+ ent.new_map = cpu_to_le32(arena->external_nlba + i);
+ ent.seq = cpu_to_le32(LOG_SEQ_INIT);
+ ret = __btt_log_write(arena, i, 0, &ent, 0);
if (ret)
goto free;
}
@@ -594,6 +600,123 @@ static int btt_freelist_init(struct arena_info *arena)
return 0;
}
+static bool ent_is_padding(struct log_entry *ent)
+{
+ return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
+ && (ent->seq == 0);
+}
+
+/*
+ * Detecting valid log indices: We read a log group (see the comments in btt.h
+ * for a description of a 'log_group' and its 'slots'), and iterate over its
+ * four slots. We expect that a padding slot will be all-zeroes, and use this
+ * to detect a padding slot vs. an actual entry.
+ *
+ * If a log_group is in the initial state, i.e. hasn't been used since the
+ * creation of this BTT layout, it will have three of the four slots with
+ * zeroes. We skip over these log_groups for the detection of log_index. If
+ * all log_groups are in the initial state (i.e. the BTT has never been
+ * written to), it is safe to assume the 'new format' of log entries in slots
+ * (0, 1).
+ */
+static int log_set_indices(struct arena_info *arena)
+{
+ bool idx_set = false, initial_state = true;
+ int ret, log_index[2] = {-1, -1};
+ u32 i, j, next_idx = 0;
+ struct log_group log;
+ u32 pad_count = 0;
+
+ for (i = 0; i < arena->nfree; i++) {
+ ret = btt_log_group_read(arena, i, &log);
+ if (ret < 0)
+ return ret;
+
+ for (j = 0; j < 4; j++) {
+ if (!idx_set) {
+ if (ent_is_padding(&log.ent[j])) {
+ pad_count++;
+ continue;
+ } else {
+ /* Skip if index has been recorded */
+ if ((next_idx == 1) &&
+ (j == log_index[0]))
+ continue;
+ /* valid entry, record index */
+ log_index[next_idx] = j;
+ next_idx++;
+ }
+ if (next_idx == 2) {
+ /* two valid entries found */
+ idx_set = true;
+ } else if (next_idx > 2) {
+ /* too many valid indices */
+ return -ENXIO;
+ }
+ } else {
+ /*
+ * once the indices have been set, just verify
+ * that all subsequent log groups are either in
+ * their initial state or follow the same
+ * indices.
+ */
+ if (j == log_index[0]) {
+ /* entry must be 'valid' */
+ if (ent_is_padding(&log.ent[j]))
+ return -ENXIO;
+ } else if (j == log_index[1]) {
+ ;
+ /*
+ * log_index[1] can be padding if the
+ * lane never got used and it is still
+ * in the initial state (three 'padding'
+ * entries)
+ */
+ } else {
+ /* entry must be invalid (padding) */
+ if (!ent_is_padding(&log.ent[j]))
+ return -ENXIO;
+ }
+ }
+ }
+ /*
+ * If any of the log_groups have more than one valid,
+ * non-padding entry, then the we are no longer in the
+ * initial_state
+ */
+ if (pad_count < 3)
+ initial_state = false;
+ pad_count = 0;
+ }
+
+ if (!initial_state && !idx_set)
+ return -ENXIO;
+
+ /*
+ * If all the entries in the log were in the initial state,
+ * assume new padding scheme
+ */
+ if (initial_state)
+ log_index[1] = 1;
+
+ /*
+ * Only allow the known permutations of log/padding indices,
+ * i.e. (0, 1), and (0, 2)
+ */
+ if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
+ ; /* known index possibilities */
+ else {
+ dev_err(to_dev(arena), "Found an unknown padding scheme\n");
+ return -ENXIO;
+ }
+
+ arena->log_index[0] = log_index[0];
+ arena->log_index[1] = log_index[1];
+ dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
+ dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
+ return 0;
+}
+
static int btt_rtt_init(struct arena_info *arena)
{
arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
@@ -650,8 +773,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
available -= 2 * BTT_PG_SIZE;
/* The log takes a fixed amount of space based on nfree */
- logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
- BTT_PG_SIZE);
+ logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
available -= logsize;
/* Calculate optimal split between map and data area */
@@ -668,6 +790,10 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
arena->mapoff = arena->dataoff + datasize;
arena->logoff = arena->mapoff + mapsize;
arena->info2off = arena->logoff + logsize;
+
+ /* Default log indices are (0,1) */
+ arena->log_index[0] = 0;
+ arena->log_index[1] = 1;
return arena;
}
@@ -758,6 +884,13 @@ static int discover_arenas(struct btt *btt)
arena->external_lba_start = cur_nlba;
parse_arena_meta(arena, super, cur_off);
+ ret = log_set_indices(arena);
+ if (ret) {
+ dev_err(to_dev(arena),
+ "Unable to deduce log/padding indices\n");
+ goto out;
+ }
+
mutex_init(&arena->err_lock);
ret = btt_freelist_init(arena);
if (ret)
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index 578c205..db3cb6d 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -27,6 +27,7 @@
#define MAP_ERR_MASK (1 << MAP_ERR_SHIFT)
#define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT)))
#define MAP_ENT_NORMAL 0xC0000000
+#define LOG_GRP_SIZE sizeof(struct log_group)
#define LOG_ENT_SIZE sizeof(struct log_entry)
#define ARENA_MIN_SIZE (1UL << 24) /* 16 MB */
#define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */
@@ -50,12 +51,52 @@ enum btt_init_state {
INIT_READY
};
+/*
+ * A log group represents one log 'lane', and consists of four log entries.
+ * Two of the four entries are valid entries, and the remaining two are
+ * padding. Due to an old bug in the padding location, we need to perform a
+ * test to determine the padding scheme being used, and use that scheme
+ * thereafter.
+ *
+ * In kernels prior to 4.15, 'log group' would have actual log entries at
+ * indices (0, 2) and padding at indices (1, 3), where as the correct/updated
+ * format has log entries at indices (0, 1) and padding at indices (2, 3).
+ *
+ * Old (pre 4.15) format:
+ * +-----------------+-----------------+
+ * | ent[0] | ent[1] |
+ * | 16B | 16B |
+ * | lba/old/new/seq | pad |
+ * +-----------------------------------+
+ * | ent[2] | ent[3] |
+ * | 16B | 16B |
+ * | lba/old/new/seq | pad |
+ * +-----------------+-----------------+
+ *
+ * New format:
+ * +-----------------+-----------------+
+ * | ent[0] | ent[1] |
+ * | 16B | 16B |
+ * | lba/old/new/seq | lba/old/new/seq |
+ * +-----------------------------------+
+ * | ent[2] | ent[3] |
+ * | 16B | 16B |
+ * | pad | pad |
+ * +-----------------+-----------------+
+ *
+ * We detect during start-up which format is in use, and set
+ * arena->log_index[(0, 1)] with the detected format.
+ */
+
struct log_entry {
__le32 lba;
__le32 old_map;
__le32 new_map;
__le32 seq;
- __le64 padding[2];
+};
+
+struct log_group {
+ struct log_entry ent[4];
};
struct btt_sb {
@@ -125,6 +166,8 @@ struct aligned_lock {
* @list: List head for list of arenas
* @debugfs_dir: Debugfs dentry
* @flags: Arena flags - may signify error states.
+ * @err_lock: Mutex for synchronizing error clearing.
+ * @log_index: Indices of the valid log entries in a log_group
*
* arena_info is a per-arena handle. Once an arena is narrowed down for an
* IO, this struct is passed around for the duration of the IO.
@@ -157,6 +200,7 @@ struct arena_info {
/* Arena flags */
u32 flags;
struct mutex err_lock;
+ int log_index[2];
};
/**
@@ -176,6 +220,7 @@ struct arena_info {
* @init_lock: Mutex used for the BTT initialization
* @init_state: Flag describing the initialization state for the BTT
* @num_arenas: Number of arenas in the BTT instance
+ * @phys_bb: Pointer to the namespace's badblocks structure
*/
struct btt {
struct gendisk *btt_disk;
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 65cc171..2adada1 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -364,9 +364,9 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
- unsigned long align;
enum nd_pfn_mode mode;
struct nd_namespace_io *nsio;
+ unsigned long align, start_pad;
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
struct nd_namespace_common *ndns = nd_pfn->ndns;
const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev);
@@ -410,6 +410,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
align = le32_to_cpu(pfn_sb->align);
offset = le64_to_cpu(pfn_sb->dataoff);
+ start_pad = le32_to_cpu(pfn_sb->start_pad);
if (align == 0)
align = 1UL << ilog2(offset);
mode = le32_to_cpu(pfn_sb->mode);
@@ -468,7 +469,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
return -EBUSY;
}
- if ((align && !IS_ALIGNED(offset, align))
+ if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align))
|| !IS_ALIGNED(offset, PAGE_SIZE)) {
dev_err(&nd_pfn->dev,
"bad offset: %#llx dax disabled align: %#lx\n",
@@ -582,6 +583,12 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
return altmap;
}
+static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
+{
+ return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys),
+ ALIGN_DOWN(phys, nd_pfn->align));
+}
+
static int nd_pfn_init(struct nd_pfn *nd_pfn)
{
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
@@ -637,13 +644,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
start = nsio->res.start;
size = PHYS_SECTION_ALIGN_UP(start + size) - start;
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED) {
+ IORES_DESC_NONE) == REGION_MIXED
+ || !IS_ALIGNED(start + resource_size(&nsio->res),
+ nd_pfn->align)) {
size = resource_size(&nsio->res);
- end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
+ end_trunc = start + size - phys_pmem_align_down(nd_pfn,
+ start + size);
}
if (start_pad + end_trunc)
- dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
+ dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
dev_name(&ndns->dev), start_pad + end_trunc);
/*
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index a25fd43..441e67e 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -I$(src)
+
obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
@@ -6,6 +9,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
nvme-core-y := core.o
+nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f837d66..e810487 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -29,6 +29,9 @@
#include <linux/pm_qos.h>
#include <asm/unaligned.h>
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
#include "nvme.h"
#include "fabrics.h"
@@ -65,9 +68,26 @@ static bool streams;
module_param(streams, bool, 0644);
MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
+/*
+ * nvme_wq - hosts nvme related works that are not reset or delete
+ * nvme_reset_wq - hosts nvme reset works
+ * nvme_delete_wq - hosts nvme delete works
+ *
+ * nvme_wq will host works such are scan, aen handling, fw activation,
+ * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
+ * runs reset works which also flush works hosted on nvme_wq for
+ * serialization purposes. nvme_delete_wq host controller deletion
+ * works which flush reset works for serialization.
+ */
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq);
+struct workqueue_struct *nvme_reset_wq;
+EXPORT_SYMBOL_GPL(nvme_reset_wq);
+
+struct workqueue_struct *nvme_delete_wq;
+EXPORT_SYMBOL_GPL(nvme_delete_wq);
+
static DEFINE_IDA(nvme_subsystems_ida);
static LIST_HEAD(nvme_subsystems);
static DEFINE_MUTEX(nvme_subsystems_lock);
@@ -89,13 +109,13 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
return -EBUSY;
- if (!queue_work(nvme_wq, &ctrl->reset_work))
+ if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
return -EBUSY;
return 0;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
-static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
@@ -104,6 +124,7 @@ static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
flush_work(&ctrl->reset_work);
return ret;
}
+EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_delete_ctrl_work(struct work_struct *work)
{
@@ -122,7 +143,7 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
return -EBUSY;
- if (!queue_work(nvme_wq, &ctrl->delete_work))
+ if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
return -EBUSY;
return 0;
}
@@ -157,13 +178,20 @@ static blk_status_t nvme_error_status(struct request *req)
return BLK_STS_OK;
case NVME_SC_CAP_EXCEEDED:
return BLK_STS_NOSPC;
+ case NVME_SC_LBA_RANGE:
+ return BLK_STS_TARGET;
+ case NVME_SC_BAD_ATTRIBUTES:
case NVME_SC_ONCS_NOT_SUPPORTED:
+ case NVME_SC_INVALID_OPCODE:
+ case NVME_SC_INVALID_FIELD:
+ case NVME_SC_INVALID_NS:
return BLK_STS_NOTSUPP;
case NVME_SC_WRITE_FAULT:
case NVME_SC_READ_ERROR:
case NVME_SC_UNWRITTEN_BLOCK:
case NVME_SC_ACCESS_DENIED:
case NVME_SC_READ_ONLY:
+ case NVME_SC_COMPARE_FAILED:
return BLK_STS_MEDIUM;
case NVME_SC_GUARD_CHECK:
case NVME_SC_APPTAG_CHECK:
@@ -190,8 +218,12 @@ static inline bool nvme_req_needs_retry(struct request *req)
void nvme_complete_rq(struct request *req)
{
- if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
- if (nvme_req_needs_failover(req)) {
+ blk_status_t status = nvme_error_status(req);
+
+ trace_nvme_complete_rq(req);
+
+ if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
+ if (nvme_req_needs_failover(req, status)) {
nvme_failover_req(req);
return;
}
@@ -202,8 +234,7 @@ void nvme_complete_rq(struct request *req)
return;
}
}
-
- blk_mq_end_request(req, nvme_error_status(req));
+ blk_mq_end_request(req, status);
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
@@ -232,6 +263,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
old_state = ctrl->state;
switch (new_state) {
+ case NVME_CTRL_ADMIN_ONLY:
+ switch (old_state) {
+ case NVME_CTRL_RECONNECTING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
case NVME_CTRL_LIVE:
switch (old_state) {
case NVME_CTRL_NEW:
@@ -247,6 +287,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
changed = true;
/* FALLTHRU */
default:
@@ -266,6 +307,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_DELETING:
switch (old_state) {
case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
case NVME_CTRL_RESETTING:
case NVME_CTRL_RECONNECTING:
changed = true;
@@ -591,6 +633,10 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
}
cmd->common.command_id = req->tag;
+ if (ns)
+ trace_nvme_setup_nvm_cmd(req->q->id, cmd);
+ else
+ trace_nvme_setup_admin_cmd(cmd);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
@@ -1217,16 +1263,27 @@ static int nvme_open(struct block_device *bdev, fmode_t mode)
#ifdef CONFIG_NVME_MULTIPATH
/* should never be called due to GENHD_FL_HIDDEN */
if (WARN_ON_ONCE(ns->head->disk))
- return -ENXIO;
+ goto fail;
#endif
if (!kref_get_unless_zero(&ns->kref))
- return -ENXIO;
+ goto fail;
+ if (!try_module_get(ns->ctrl->ops->module))
+ goto fail_put_ns;
+
return 0;
+
+fail_put_ns:
+ nvme_put_ns(ns);
+fail:
+ return -ENXIO;
}
static void nvme_release(struct gendisk *disk, fmode_t mode)
{
- nvme_put_ns(disk->private_data);
+ struct nvme_ns *ns = disk->private_data;
+
+ module_put(ns->ctrl->ops->module);
+ nvme_put_ns(ns);
}
static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -1287,7 +1344,7 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl,
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
NVME_DSM_MAX_RANGES);
- queue->limits.discard_alignment = size;
+ queue->limits.discard_alignment = 0;
queue->limits.discard_granularity = size;
blk_queue_max_discard_sectors(queue, UINT_MAX);
@@ -1335,6 +1392,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id)
{
sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
+ unsigned short bs = 1 << ns->lba_shift;
unsigned stream_alignment = 0;
if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
@@ -1343,7 +1401,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
blk_mq_freeze_queue(disk->queue);
blk_integrity_unregister(disk);
- blk_queue_logical_block_size(disk->queue, 1 << ns->lba_shift);
+ blk_queue_logical_block_size(disk->queue, bs);
+ blk_queue_physical_block_size(disk->queue, bs);
+ blk_queue_io_min(disk->queue, bs);
+
if (ns->ms && !ns->ext &&
(ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
nvme_init_integrity(disk, ns->ms, ns->pi_type);
@@ -1705,7 +1766,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
- if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
+ if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
+ is_power_of_2(ctrl->max_hw_sectors))
blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
blk_queue_virt_boundary(q, ctrl->page_size - 1);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
@@ -2047,6 +2109,22 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
+static int nvme_active_ctrls(struct nvme_subsystem *subsys)
+{
+ int count = 0;
+ struct nvme_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DEAD)
+ count++;
+ }
+ mutex_unlock(&subsys->lock);
+
+ return count;
+}
+
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
struct nvme_subsystem *subsys, *found;
@@ -2085,7 +2163,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
* Verify that the subsystem actually supports multiple
* controllers, else bail out.
*/
- if (!(id->cmic & (1 << 1))) {
+ if (nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
dev_err(ctrl->device,
"ignoring ctrl due to duplicate subnqn (%s).\n",
found->subnqn);
@@ -2252,7 +2330,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
shutdown_timeout, 60);
if (ctrl->shutdown_timeout != shutdown_timeout)
- dev_warn(ctrl->device,
+ dev_info(ctrl->device,
"Shutdown timeout set to %u seconds\n",
ctrl->shutdown_timeout);
} else
@@ -2336,8 +2414,14 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
struct nvme_ctrl *ctrl =
container_of(inode->i_cdev, struct nvme_ctrl, cdev);
- if (ctrl->state != NVME_CTRL_LIVE)
+ switch (ctrl->state) {
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
+ break;
+ default:
return -EWOULDBLOCK;
+ }
+
file->private_data = ctrl;
return 0;
}
@@ -2601,6 +2685,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live",
+ [NVME_CTRL_ADMIN_ONLY] = "only-admin",
[NVME_CTRL_RESETTING] = "resetting",
[NVME_CTRL_RECONNECTING]= "reconnecting",
[NVME_CTRL_DELETING] = "deleting",
@@ -2869,7 +2954,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
nvme_set_queue_limits(ctrl, ns->queue);
- nvme_setup_streams_ns(ctrl, ns);
id = nvme_identify_ns(ctrl, nsid);
if (!id)
@@ -2880,6 +2964,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (nvme_init_ns_head(ns, nsid, id, &new))
goto out_free_id;
+ nvme_setup_streams_ns(ctrl, ns);
#ifdef CONFIG_NVME_MULTIPATH
/*
@@ -2965,8 +3050,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
return;
if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
- if (blk_get_integrity(ns->disk))
- blk_integrity_unregister(ns->disk);
nvme_mpath_remove_disk_links(ns);
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_id_attr_group);
@@ -2974,6 +3057,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
nvme_nvm_unregister_sysfs(ns);
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
+ if (blk_get_integrity(ns->disk))
+ blk_integrity_unregister(ns->disk);
}
mutex_lock(&ns->ctrl->subsys->lock);
@@ -2986,6 +3071,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_unlock(&ns->ctrl->namespaces_mutex);
synchronize_srcu(&ns->head->srcu);
+ nvme_mpath_check_last_path(ns);
nvme_put_ns(ns);
}
@@ -3073,6 +3159,8 @@ static void nvme_scan_work(struct work_struct *work)
if (ctrl->state != NVME_CTRL_LIVE)
return;
+ WARN_ON_ONCE(!ctrl->tagset);
+
if (nvme_identify_ctrl(ctrl, &id))
return;
@@ -3093,8 +3181,7 @@ static void nvme_scan_work(struct work_struct *work)
void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
/*
- * Do not queue new scan work when a controller is reset during
- * removal.
+ * Only new queue scan work when admin and IO queues are both alive
*/
if (ctrl->state == NVME_CTRL_LIVE)
queue_work(nvme_wq, &ctrl->scan_work);
@@ -3471,16 +3558,26 @@ EXPORT_SYMBOL_GPL(nvme_reinit_tagset);
int __init nvme_core_init(void)
{
- int result;
+ int result = -ENOMEM;
nvme_wq = alloc_workqueue("nvme-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvme_wq)
- return -ENOMEM;
+ goto out;
+
+ nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_reset_wq)
+ goto destroy_wq;
+
+ nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_delete_wq)
+ goto destroy_reset_wq;
result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
if (result < 0)
- goto destroy_wq;
+ goto destroy_delete_wq;
nvme_class = class_create(THIS_MODULE, "nvme");
if (IS_ERR(nvme_class)) {
@@ -3499,8 +3596,13 @@ destroy_class:
class_destroy(nvme_class);
unregister_chrdev:
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
+destroy_delete_wq:
+ destroy_workqueue(nvme_delete_wq);
+destroy_reset_wq:
+ destroy_workqueue(nvme_reset_wq);
destroy_wq:
destroy_workqueue(nvme_wq);
+out:
return result;
}
@@ -3510,6 +3612,8 @@ void nvme_core_exit(void)
class_destroy(nvme_subsys_class);
class_destroy(nvme_class);
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
+ destroy_workqueue(nvme_delete_wq);
+ destroy_workqueue(nvme_reset_wq);
destroy_workqueue(nvme_wq);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 76b4fe6..5dd4cee 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -74,6 +74,7 @@ static struct nvmf_host *nvmf_host_default(void)
return NULL;
kref_init(&host->ref);
+ uuid_gen(&host->id);
snprintf(host->nqn, NVMF_NQN_SIZE,
"nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
@@ -492,7 +493,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
*/
int nvmf_register_transport(struct nvmf_transport_ops *ops)
{
- if (!ops->create_ctrl)
+ if (!ops->create_ctrl || !ops->module)
return -EINVAL;
down_write(&nvmf_transports_rwsem);
@@ -738,11 +739,14 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
- if (uuid_parse(p, &hostid)) {
+ ret = uuid_parse(p, &hostid);
+ if (ret) {
pr_err("Invalid hostid %s\n", p);
ret = -EINVAL;
+ kfree(p);
goto out;
}
+ kfree(p);
break;
case NVMF_OPT_DUP_CONNECT:
opts->duplicate_connect = true;
@@ -868,32 +872,41 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
goto out_unlock;
}
+ if (!try_module_get(ops->module)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
ret = nvmf_check_required_opts(opts, ops->required_opts);
if (ret)
- goto out_unlock;
+ goto out_module_put;
ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
ops->allowed_opts | ops->required_opts);
if (ret)
- goto out_unlock;
+ goto out_module_put;
ctrl = ops->create_ctrl(dev, opts);
if (IS_ERR(ctrl)) {
ret = PTR_ERR(ctrl);
- goto out_unlock;
+ goto out_module_put;
}
if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) {
dev_warn(ctrl->device,
"controller returned incorrect NQN: \"%s\".\n",
ctrl->subsys->subnqn);
+ module_put(ops->module);
up_read(&nvmf_transports_rwsem);
nvme_delete_ctrl_sync(ctrl);
return ERR_PTR(-EINVAL);
}
+ module_put(ops->module);
up_read(&nvmf_transports_rwsem);
return ctrl;
+out_module_put:
+ module_put(ops->module);
out_unlock:
up_read(&nvmf_transports_rwsem);
out_free_opts:
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 9ba6149..25b19f7 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -108,6 +108,7 @@ struct nvmf_ctrl_options {
* fabric implementation of NVMe fabrics.
* @entry: Used by the fabrics library to add the new
* registration entry to its linked-list internal tree.
+ * @module: Transport module reference
* @name: Name of the NVMe fabric driver implementation.
* @required_opts: sysfs command-line options that must be specified
* when adding a new NVMe controller.
@@ -126,6 +127,7 @@ struct nvmf_ctrl_options {
*/
struct nvmf_transport_ops {
struct list_head entry;
+ struct module *module;
const char *name;
int required_opts;
int allowed_opts;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 0a8af4d..99bf51c 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2921,6 +2921,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
nvme_fc_free_queue(&ctrl->queues[0]);
+ /* re-enable the admin_q so anything new can fast fail */
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+
nvme_fc_ctlr_inactive_on_rport(ctrl);
}
@@ -2935,6 +2938,9 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
* waiting for io to terminate
*/
nvme_fc_delete_association(ctrl);
+
+ /* resume the io queues so that things will fast fail */
+ nvme_start_queues(nctrl);
}
static void
@@ -3221,7 +3227,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
/* initiate nvme ctrl ref counting teardown */
nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
/* Remove core ctrl ref. */
nvme_put_ctrl(&ctrl->ctrl);
@@ -3381,6 +3386,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
static struct nvmf_transport_ops nvme_fc_transport = {
.name = "fc",
+ .module = THIS_MODULE,
.required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
.allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
.create_ctrl = nvme_fc_create_ctrl,
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index ba3d7f3..50ef71ee 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -31,27 +31,10 @@
enum nvme_nvm_admin_opcode {
nvme_nvm_admin_identity = 0xe2,
- nvme_nvm_admin_get_l2p_tbl = 0xea,
nvme_nvm_admin_get_bb_tbl = 0xf2,
nvme_nvm_admin_set_bb_tbl = 0xf1,
};
-struct nvme_nvm_hb_rw {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd2;
- __le64 metadata;
- __le64 prp1;
- __le64 prp2;
- __le64 spba;
- __le16 length;
- __le16 control;
- __le32 dsmgmt;
- __le64 slba;
-};
-
struct nvme_nvm_ph_rw {
__u8 opcode;
__u8 flags;
@@ -80,19 +63,6 @@ struct nvme_nvm_identity {
__u32 rsvd11[5];
};
-struct nvme_nvm_l2ptbl {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __le32 cdw2[4];
- __le64 prp1;
- __le64 prp2;
- __le64 slba;
- __le32 nlb;
- __le16 cdw14[6];
-};
-
struct nvme_nvm_getbbtbl {
__u8 opcode;
__u8 flags;
@@ -139,9 +109,7 @@ struct nvme_nvm_command {
union {
struct nvme_common_command common;
struct nvme_nvm_identity identity;
- struct nvme_nvm_hb_rw hb_rw;
struct nvme_nvm_ph_rw ph_rw;
- struct nvme_nvm_l2ptbl l2p;
struct nvme_nvm_getbbtbl get_bb;
struct nvme_nvm_setbbtbl set_bb;
struct nvme_nvm_erase_blk erase;
@@ -167,7 +135,7 @@ struct nvme_nvm_id_group {
__u8 num_lun;
__u8 num_pln;
__u8 rsvd1;
- __le16 num_blk;
+ __le16 num_chk;
__le16 num_pg;
__le16 fpg_sz;
__le16 csecs;
@@ -234,11 +202,9 @@ struct nvme_nvm_bb_tbl {
static inline void _nvme_nvm_check_size(void)
{
BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
@@ -249,51 +215,58 @@ static inline void _nvme_nvm_check_size(void)
static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
{
struct nvme_nvm_id_group *src;
- struct nvm_id_group *dst;
+ struct nvm_id_group *grp;
+ int sec_per_pg, sec_per_pl, pg_per_blk;
if (nvme_nvm_id->cgrps != 1)
return -EINVAL;
src = &nvme_nvm_id->groups[0];
- dst = &nvm_id->grp;
-
- dst->mtype = src->mtype;
- dst->fmtype = src->fmtype;
- dst->num_ch = src->num_ch;
- dst->num_lun = src->num_lun;
- dst->num_pln = src->num_pln;
-
- dst->num_pg = le16_to_cpu(src->num_pg);
- dst->num_blk = le16_to_cpu(src->num_blk);
- dst->fpg_sz = le16_to_cpu(src->fpg_sz);
- dst->csecs = le16_to_cpu(src->csecs);
- dst->sos = le16_to_cpu(src->sos);
-
- dst->trdt = le32_to_cpu(src->trdt);
- dst->trdm = le32_to_cpu(src->trdm);
- dst->tprt = le32_to_cpu(src->tprt);
- dst->tprm = le32_to_cpu(src->tprm);
- dst->tbet = le32_to_cpu(src->tbet);
- dst->tbem = le32_to_cpu(src->tbem);
- dst->mpos = le32_to_cpu(src->mpos);
- dst->mccap = le32_to_cpu(src->mccap);
-
- dst->cpar = le16_to_cpu(src->cpar);
-
- if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
- memcpy(dst->lptbl.id, src->lptbl.id, 8);
- dst->lptbl.mlc.num_pairs =
- le16_to_cpu(src->lptbl.mlc.num_pairs);
-
- if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
- pr_err("nvm: number of MLC pairs not supported\n");
- return -EINVAL;
- }
+ grp = &nvm_id->grp;
+
+ grp->mtype = src->mtype;
+ grp->fmtype = src->fmtype;
+
+ grp->num_ch = src->num_ch;
+ grp->num_lun = src->num_lun;
+
+ grp->num_chk = le16_to_cpu(src->num_chk);
+ grp->csecs = le16_to_cpu(src->csecs);
+ grp->sos = le16_to_cpu(src->sos);
+
+ pg_per_blk = le16_to_cpu(src->num_pg);
+ sec_per_pg = le16_to_cpu(src->fpg_sz) / grp->csecs;
+ sec_per_pl = sec_per_pg * src->num_pln;
+ grp->clba = sec_per_pl * pg_per_blk;
+ grp->ws_per_chk = pg_per_blk;
- memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
- dst->lptbl.mlc.num_pairs);
+ grp->mpos = le32_to_cpu(src->mpos);
+ grp->cpar = le16_to_cpu(src->cpar);
+ grp->mccap = le32_to_cpu(src->mccap);
+
+ grp->ws_opt = grp->ws_min = sec_per_pg;
+ grp->ws_seq = NVM_IO_SNGL_ACCESS;
+
+ if (grp->mpos & 0x020202) {
+ grp->ws_seq = NVM_IO_DUAL_ACCESS;
+ grp->ws_opt <<= 1;
+ } else if (grp->mpos & 0x040404) {
+ grp->ws_seq = NVM_IO_QUAD_ACCESS;
+ grp->ws_opt <<= 2;
}
+ grp->trdt = le32_to_cpu(src->trdt);
+ grp->trdm = le32_to_cpu(src->trdm);
+ grp->tprt = le32_to_cpu(src->tprt);
+ grp->tprm = le32_to_cpu(src->tprm);
+ grp->tbet = le32_to_cpu(src->tbet);
+ grp->tbem = le32_to_cpu(src->tbem);
+
+ /* 1.2 compatibility */
+ grp->num_pln = src->num_pln;
+ grp->num_pg = le16_to_cpu(src->num_pg);
+ grp->fpg_sz = le16_to_cpu(src->fpg_sz);
+
return 0;
}
@@ -332,62 +305,6 @@ out:
return ret;
}
-static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
- nvm_l2p_update_fn *update_l2p, void *priv)
-{
- struct nvme_ns *ns = nvmdev->q->queuedata;
- struct nvme_nvm_command c = {};
- u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
- u32 nlb_pr_rq = len / sizeof(u64);
- u64 cmd_slba = slba;
- void *entries;
- int ret = 0;
-
- c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
- c.l2p.nsid = cpu_to_le32(ns->head->ns_id);
- entries = kmalloc(len, GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- while (nlb) {
- u32 cmd_nlb = min(nlb_pr_rq, nlb);
- u64 elba = slba + cmd_nlb;
-
- c.l2p.slba = cpu_to_le64(cmd_slba);
- c.l2p.nlb = cpu_to_le32(cmd_nlb);
-
- ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
- (struct nvme_command *)&c, entries, len);
- if (ret) {
- dev_err(ns->ctrl->device,
- "L2P table transfer failed (%d)\n", ret);
- ret = -EIO;
- goto out;
- }
-
- if (unlikely(elba > nvmdev->total_secs)) {
- pr_err("nvm: L2P data from device is out of bounds!\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Transform physical address to target address space */
- nvm_part_to_tgt(nvmdev, entries, cmd_nlb);
-
- if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
- ret = -EINTR;
- goto out;
- }
-
- cmd_slba += cmd_nlb;
- nlb -= cmd_nlb;
- }
-
-out:
- kfree(entries);
- return ret;
-}
-
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
u8 *blks)
{
@@ -397,7 +314,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
- int nr_blks = geo->blks_per_lun * geo->plane_mode;
+ int nr_blks = geo->nr_chks * geo->plane_mode;
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0;
@@ -438,7 +355,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
goto out;
}
- memcpy(blks, bb_tbl->blk, geo->blks_per_lun * geo->plane_mode);
+ memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->plane_mode);
out:
kfree(bb_tbl);
return ret;
@@ -474,10 +391,6 @@ static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
c->ph_rw.control = cpu_to_le16(rqd->flags);
c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
-
- if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
- c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
- rqd->bio->bi_iter.bi_sector));
}
static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
@@ -597,8 +510,6 @@ static void nvme_nvm_dev_dma_free(void *pool, void *addr,
static struct nvm_dev_ops nvme_nvm_dev_ops = {
.identity = nvme_nvm_identity,
- .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
-
.get_bb_tbl = nvme_nvm_get_bb_tbl,
.set_bb_tbl = nvme_nvm_set_bb_tbl,
@@ -883,7 +794,7 @@ static ssize_t nvm_dev_attr_show(struct device *dev,
} else if (strcmp(attr->name, "num_planes") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
} else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_chk);
} else if (strcmp(attr->name, "num_pages") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
} else if (strcmp(attr->name, "page_size") == 0) {
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 1218a9f..3b211d9 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -33,51 +33,11 @@ void nvme_failover_req(struct request *req)
kblockd_schedule_work(&ns->head->requeue_work);
}
-bool nvme_req_needs_failover(struct request *req)
+bool nvme_req_needs_failover(struct request *req, blk_status_t error)
{
if (!(req->cmd_flags & REQ_NVME_MPATH))
return false;
-
- switch (nvme_req(req)->status & 0x7ff) {
- /*
- * Generic command status:
- */
- case NVME_SC_INVALID_OPCODE:
- case NVME_SC_INVALID_FIELD:
- case NVME_SC_INVALID_NS:
- case NVME_SC_LBA_RANGE:
- case NVME_SC_CAP_EXCEEDED:
- case NVME_SC_RESERVATION_CONFLICT:
- return false;
-
- /*
- * I/O command set specific error. Unfortunately these values are
- * reused for fabrics commands, but those should never get here.
- */
- case NVME_SC_BAD_ATTRIBUTES:
- case NVME_SC_INVALID_PI:
- case NVME_SC_READ_ONLY:
- case NVME_SC_ONCS_NOT_SUPPORTED:
- WARN_ON_ONCE(nvme_req(req)->cmd->common.opcode ==
- nvme_fabrics_command);
- return false;
-
- /*
- * Media and Data Integrity Errors:
- */
- case NVME_SC_WRITE_FAULT:
- case NVME_SC_READ_ERROR:
- case NVME_SC_GUARD_CHECK:
- case NVME_SC_APPTAG_CHECK:
- case NVME_SC_REFTAG_CHECK:
- case NVME_SC_COMPARE_FAILED:
- case NVME_SC_ACCESS_DENIED:
- case NVME_SC_UNWRITTEN_BLOCK:
- return false;
- }
-
- /* Everything else could be a path failure, so should be retried */
- return true;
+ return blk_path_error(error);
}
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ea1aa52..8e4550f 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -32,6 +32,8 @@ extern unsigned int admin_timeout;
#define NVME_KATO_GRACE 10
extern struct workqueue_struct *nvme_wq;
+extern struct workqueue_struct *nvme_reset_wq;
+extern struct workqueue_struct *nvme_delete_wq;
enum {
NVME_NS_LBA = 0,
@@ -119,6 +121,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
enum nvme_ctrl_state {
NVME_CTRL_NEW,
NVME_CTRL_LIVE,
+ NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
NVME_CTRL_RESETTING,
NVME_CTRL_RECONNECTING,
NVME_CTRL_DELETING,
@@ -393,6 +396,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
+int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
@@ -401,7 +405,7 @@ extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
void nvme_failover_req(struct request *req);
-bool nvme_req_needs_failover(struct request *req);
+bool nvme_req_needs_failover(struct request *req, blk_status_t error);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns_head *head);
@@ -417,11 +421,21 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
rcu_assign_pointer(head->current_path, NULL);
}
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+
+static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+{
+ struct nvme_ns_head *head = ns->head;
+
+ if (head->disk && list_empty(&head->list))
+ kblockd_schedule_work(&head->requeue_work);
+}
+
#else
static inline void nvme_failover_req(struct request *req)
{
}
-static inline bool nvme_req_needs_failover(struct request *req)
+static inline bool nvme_req_needs_failover(struct request *req,
+ blk_status_t error)
{
return false;
}
@@ -448,6 +462,9 @@ static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
}
+static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+{
+}
#endif /* CONFIG_NVME_MULTIPATH */
#ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f5800c3c..6fe7af0 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -75,7 +75,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
struct nvme_dev {
- struct nvme_queue **queues;
+ struct nvme_queue *queues;
struct blk_mq_tag_set tagset;
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
@@ -365,7 +365,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
struct nvme_dev *dev = data;
- struct nvme_queue *nvmeq = dev->queues[0];
+ struct nvme_queue *nvmeq = &dev->queues[0];
WARN_ON(hctx_idx != 0);
WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
@@ -387,7 +387,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
struct nvme_dev *dev = data;
- struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
+ struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
if (!nvmeq->tags)
nvmeq->tags = &dev->tagset.tags[hctx_idx];
@@ -403,7 +403,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
- struct nvme_queue *nvmeq = dev->queues[queue_idx];
+ struct nvme_queue *nvmeq = &dev->queues[queue_idx];
BUG_ON(!nvmeq);
iod->nvmeq = nvmeq;
@@ -448,12 +448,34 @@ static void **nvme_pci_iod_list(struct request *req)
return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
}
+static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ int nseg = blk_rq_nr_phys_segments(req);
+ unsigned int avg_seg_size;
+
+ if (nseg == 0)
+ return false;
+
+ avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
+
+ if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
+ return false;
+ if (!iod->nvmeq->qid)
+ return false;
+ if (!sgl_threshold || avg_seg_size < sgl_threshold)
+ return false;
+ return true;
+}
+
static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
int nseg = blk_rq_nr_phys_segments(rq);
unsigned int size = blk_rq_payload_bytes(rq);
+ iod->use_sgl = nvme_pci_use_sgls(dev, rq);
+
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
iod->use_sgl);
@@ -604,8 +626,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_addr_t prp_dma;
int nprps, i;
- iod->use_sgl = false;
-
length -= (page_size - offset);
if (length <= 0) {
iod->first_dma = 0;
@@ -705,22 +725,19 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
}
static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
- struct request *req, struct nvme_rw_command *cmd)
+ struct request *req, struct nvme_rw_command *cmd, int entries)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- int length = blk_rq_payload_bytes(req);
struct dma_pool *pool;
struct nvme_sgl_desc *sg_list;
struct scatterlist *sg = iod->sg;
- int entries = iod->nents, i = 0;
dma_addr_t sgl_dma;
-
- iod->use_sgl = true;
+ int i = 0;
/* setting the transfer type as SGL */
cmd->flags = NVME_CMD_SGL_METABUF;
- if (length == sg_dma_len(sg)) {
+ if (entries == 1) {
nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
return BLK_STS_OK;
}
@@ -760,33 +777,12 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
}
nvme_pci_sgl_set_data(&sg_list[i++], sg);
-
- length -= sg_dma_len(sg);
sg = sg_next(sg);
- entries--;
- } while (length > 0);
+ } while (--entries > 0);
- WARN_ON(entries > 0);
return BLK_STS_OK;
}
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- unsigned int avg_seg_size;
-
- avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
- blk_rq_nr_phys_segments(req));
-
- if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
- return false;
- if (!iod->nvmeq->qid)
- return false;
- if (!sgl_threshold || avg_seg_size < sgl_threshold)
- return false;
- return true;
-}
-
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
@@ -795,6 +791,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
enum dma_data_direction dma_dir = rq_data_dir(req) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE;
blk_status_t ret = BLK_STS_IOERR;
+ int nr_mapped;
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
iod->nents = blk_rq_map_sg(q, req, iod->sg);
@@ -802,12 +799,13 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out;
ret = BLK_STS_RESOURCE;
- if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
- DMA_ATTR_NO_WARN))
+ nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
+ DMA_ATTR_NO_WARN);
+ if (!nr_mapped)
goto out;
- if (nvme_pci_use_sgls(dev, req))
- ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
+ if (iod->use_sgl)
+ ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
@@ -1046,7 +1044,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
- struct nvme_queue *nvmeq = dev->queues[0];
+ struct nvme_queue *nvmeq = &dev->queues[0];
struct nvme_command c;
memset(&c, 0, sizeof(c));
@@ -1140,9 +1138,14 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
*/
bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
- /* If there is a reset ongoing, we shouldn't reset again. */
- if (dev->ctrl.state == NVME_CTRL_RESETTING)
+ /* If there is a reset/reinit ongoing, we shouldn't reset again. */
+ switch (dev->ctrl.state) {
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_RECONNECTING:
return false;
+ default:
+ break;
+ }
/* We shouldn't reset unless the controller is on fatal error state
* _or_ if we lost the communication with it.
@@ -1282,7 +1285,6 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
if (nvmeq->sq_cmds)
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
- kfree(nvmeq);
}
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
@@ -1290,10 +1292,8 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
int i;
for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
dev->ctrl.queue_count--;
- dev->queues[i] = NULL;
- nvme_free_queue(nvmeq);
+ nvme_free_queue(&dev->queues[i]);
}
}
@@ -1325,12 +1325,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
{
- struct nvme_queue *nvmeq = dev->queues[0];
-
- if (!nvmeq)
- return;
- if (nvme_suspend_queue(nvmeq))
- return;
+ struct nvme_queue *nvmeq = &dev->queues[0];
if (shutdown)
nvme_shutdown_ctrl(&dev->ctrl);
@@ -1369,7 +1364,7 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
int qid, int depth)
{
- if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
+ if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
dev->ctrl.page_size);
nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
@@ -1384,13 +1379,13 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
return 0;
}
-static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
- int depth, int node)
+static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
+ int depth, int node)
{
- struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL,
- node);
- if (!nvmeq)
- return NULL;
+ struct nvme_queue *nvmeq = &dev->queues[qid];
+
+ if (dev->ctrl.queue_count > qid)
+ return 0;
nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
&nvmeq->cq_dma_addr, GFP_KERNEL);
@@ -1409,17 +1404,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_depth = depth;
nvmeq->qid = qid;
nvmeq->cq_vector = -1;
- dev->queues[qid] = nvmeq;
dev->ctrl.queue_count++;
- return nvmeq;
+ return 0;
free_cqdma:
dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
nvmeq->cq_dma_addr);
free_nvmeq:
- kfree(nvmeq);
- return NULL;
+ return -ENOMEM;
}
static int queue_request_irq(struct nvme_queue *nvmeq)
@@ -1592,14 +1585,12 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
if (result < 0)
return result;
- nvmeq = dev->queues[0];
- if (!nvmeq) {
- nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
- dev_to_node(dev->dev));
- if (!nvmeq)
- return -ENOMEM;
- }
+ result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
+ dev_to_node(dev->dev));
+ if (result)
+ return result;
+ nvmeq = &dev->queues[0];
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
@@ -1629,7 +1620,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
/* vector == qid - 1, match nvme_create_queue */
- if (!nvme_alloc_queue(dev, i, dev->q_depth,
+ if (nvme_alloc_queue(dev, i, dev->q_depth,
pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
ret = -ENOMEM;
break;
@@ -1638,15 +1629,15 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
max = min(dev->max_qid, dev->ctrl.queue_count - 1);
for (i = dev->online_queues; i <= max; i++) {
- ret = nvme_create_queue(dev->queues[i], i);
+ ret = nvme_create_queue(&dev->queues[i], i);
if (ret)
break;
}
/*
* Ignore failing Create SQ/CQ commands, we can continue with less
- * than the desired aount of queues, and even a controller without
- * I/O queues an still be used to issue admin commands. This might
+ * than the desired amount of queues, and even a controller without
+ * I/O queues can still be used to issue admin commands. This might
* be useful to upgrade a buggy firmware for example.
*/
return ret >= 0 ? 0 : ret;
@@ -1663,30 +1654,40 @@ static ssize_t nvme_cmb_show(struct device *dev,
}
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
-static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
+static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
+{
+ u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
+
+ return 1ULL << (12 + 4 * szu);
+}
+
+static u32 nvme_cmb_size(struct nvme_dev *dev)
{
- u64 szu, size, offset;
+ return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
+}
+
+static void nvme_map_cmb(struct nvme_dev *dev)
+{
+ u64 size, offset;
resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- void __iomem *cmb;
int bar;
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
- if (!(NVME_CMB_SZ(dev->cmbsz)))
- return NULL;
+ if (!dev->cmbsz)
+ return;
dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
if (!use_cmb_sqes)
- return NULL;
+ return;
- szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
- size = szu * NVME_CMB_SZ(dev->cmbsz);
- offset = szu * NVME_CMB_OFST(dev->cmbloc);
+ size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
+ offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
bar = NVME_CMB_BIR(dev->cmbloc);
bar_size = pci_resource_len(pdev, bar);
if (offset > bar_size)
- return NULL;
+ return;
/*
* Controllers may support a CMB size larger than their BAR,
@@ -1696,13 +1697,16 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
- if (!cmb)
- return NULL;
-
+ dev->cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
+ if (!dev->cmb)
+ return;
dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
dev->cmb_size = size;
- return cmb;
+
+ if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
+ &dev_attr_cmb.attr, NULL))
+ dev_warn(dev->ctrl.device,
+ "failed to add sysfs attribute for CMB\n");
}
static inline void nvme_release_cmb(struct nvme_dev *dev)
@@ -1770,7 +1774,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
dma_addr_t descs_dma;
int i = 0;
void **bufs;
- u64 size = 0, tmp;
+ u64 size, tmp;
tmp = (preferred + chunk_size - 1);
do_div(tmp, chunk_size);
@@ -1853,7 +1857,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
u64 preferred = (u64)dev->ctrl.hmpre * 4096;
u64 min = (u64)dev->ctrl.hmmin * 4096;
u32 enable_bits = NVME_HOST_MEM_ENABLE;
- int ret = 0;
+ int ret;
preferred = min(preferred, max);
if (min > max) {
@@ -1894,7 +1898,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
- struct nvme_queue *adminq = dev->queues[0];
+ struct nvme_queue *adminq = &dev->queues[0];
struct pci_dev *pdev = to_pci_dev(dev->dev);
int result, nr_io_queues;
unsigned long size;
@@ -1907,7 +1911,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (nr_io_queues == 0)
return 0;
- if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
+ if (dev->cmb && (dev->cmbsz & NVME_CMBSZ_SQS)) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
sizeof(struct nvme_command));
if (result > 0)
@@ -2007,9 +2011,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return 0;
}
-static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
+static void nvme_disable_io_queues(struct nvme_dev *dev)
{
- int pass;
+ int pass, queues = dev->online_queues - 1;
unsigned long timeout;
u8 opcode = nvme_admin_delete_sq;
@@ -2020,7 +2024,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
retry:
timeout = ADMIN_TIMEOUT;
for (; i > 0; i--, sent++)
- if (nvme_delete_queue(dev->queues[i], opcode))
+ if (nvme_delete_queue(&dev->queues[i], opcode))
break;
while (sent--) {
@@ -2035,13 +2039,12 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
}
/*
- * Return: error value if an error occurred setting up the queues or calling
- * Identify Device. 0 if these succeeded, even if adding some of the
- * namespaces failed. At the moment, these failures are silent. TBD which
- * failures should be reported.
+ * return error value only when tagset allocation failed
*/
static int nvme_dev_add(struct nvme_dev *dev)
{
+ int ret;
+
if (!dev->ctrl.tagset) {
dev->tagset.ops = &nvme_mq_ops;
dev->tagset.nr_hw_queues = dev->online_queues - 1;
@@ -2057,8 +2060,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev;
- if (blk_mq_alloc_tag_set(&dev->tagset))
- return 0;
+ ret = blk_mq_alloc_tag_set(&dev->tagset);
+ if (ret) {
+ dev_warn(dev->ctrl.device,
+ "IO queues tagset allocation failed %d\n", ret);
+ return ret;
+ }
dev->ctrl.tagset = &dev->tagset;
nvme_dbbuf_set(dev);
@@ -2124,22 +2131,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
"set queue depth=%u\n", dev->q_depth);
}
- /*
- * CMBs can currently only exist on >=1.2 PCIe devices. We only
- * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
- * has no name we can pass NULL as final argument to
- * sysfs_add_file_to_group.
- */
-
- if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
- dev->cmb = nvme_map_cmb(dev);
- if (dev->cmb) {
- if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
- &dev_attr_cmb.attr, NULL))
- dev_warn(dev->ctrl.device,
- "failed to add sysfs attribute for CMB\n");
- }
- }
+ nvme_map_cmb(dev);
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
@@ -2172,7 +2164,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
- int i, queues;
+ int i;
bool dead = true;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -2207,21 +2199,13 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
}
nvme_stop_queues(&dev->ctrl);
- queues = dev->online_queues - 1;
- for (i = dev->ctrl.queue_count - 1; i > 0; i--)
- nvme_suspend_queue(dev->queues[i]);
-
- if (dead) {
- /* A device might become IO incapable very soon during
- * probe, before the admin queue is configured. Thus,
- * queue_count can be 0 here.
- */
- if (dev->ctrl.queue_count)
- nvme_suspend_queue(dev->queues[0]);
- } else {
- nvme_disable_io_queues(dev, queues);
+ if (!dead) {
+ nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
}
+ for (i = dev->ctrl.queue_count - 1; i >= 0; i--)
+ nvme_suspend_queue(&dev->queues[i]);
+
nvme_pci_disable(dev);
blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
@@ -2291,6 +2275,7 @@ static void nvme_reset_work(struct work_struct *work)
container_of(work, struct nvme_dev, ctrl.reset_work);
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result = -ENODEV;
+ enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
goto out;
@@ -2302,6 +2287,16 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
+ /*
+ * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the
+ * initializing procedure here.
+ */
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller RECONNECTING\n");
+ goto out;
+ }
+
result = nvme_pci_enable(dev);
if (result)
goto out;
@@ -2354,15 +2349,23 @@ static void nvme_reset_work(struct work_struct *work)
dev_warn(dev->ctrl.device, "IO queues not created\n");
nvme_kill_queues(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
+ new_state = NVME_CTRL_ADMIN_ONLY;
} else {
nvme_start_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
- nvme_dev_add(dev);
+ /* hit this only when allocate tagset fails */
+ if (nvme_dev_add(dev))
+ new_state = NVME_CTRL_ADMIN_ONLY;
nvme_unfreeze(&dev->ctrl);
}
- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
- dev_warn(dev->ctrl.device, "failed to mark controller live\n");
+ /*
+ * If only admin queue live, keep it to do further investigation or
+ * recovery.
+ */
+ if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller state %d\n", new_state);
goto out;
}
@@ -2470,8 +2473,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
return -ENOMEM;
- dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
- GFP_KERNEL, node);
+
+ dev->queues = kcalloc_node(num_possible_cpus() + 1,
+ sizeof(struct nvme_queue), GFP_KERNEL, node);
if (!dev->queues)
goto free;
@@ -2498,10 +2502,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto release_pools;
- nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
- queue_work(nvme_wq, &dev->ctrl.reset_work);
+ nvme_reset_ctrl(&dev->ctrl);
+
return 0;
release_pools:
@@ -2525,7 +2529,7 @@ static void nvme_reset_prepare(struct pci_dev *pdev)
static void nvme_reset_done(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_reset_ctrl(&dev->ctrl);
+ nvme_reset_ctrl_sync(&dev->ctrl);
}
static void nvme_shutdown(struct pci_dev *pdev)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 37af565..2bc059f 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -66,7 +66,6 @@ struct nvme_rdma_request {
struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
u32 num_sge;
int nents;
- bool inline_data;
struct ib_reg_wr reg_wr;
struct ib_cqe reg_cqe;
struct nvme_rdma_queue *queue;
@@ -974,12 +973,18 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_start_queues(&ctrl->ctrl);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ /* state change failure should never happen */
+ WARN_ON_ONCE(1);
+ return;
+ }
+
nvme_rdma_reconnect_or_remove(ctrl);
}
static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
{
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
return;
queue_work(nvme_wq, &ctrl->err_work);
@@ -1086,7 +1091,6 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl));
sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
- req->inline_data = true;
req->num_sge++;
return 0;
}
@@ -1158,7 +1162,6 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
int count, ret;
req->num_sge = 1;
- req->inline_data = false;
refcount_set(&req->ref, 2); /* send and recv completions */
c->common.flags |= NVME_CMD_SGL_METABUF;
@@ -1753,6 +1756,12 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
nvme_stop_ctrl(&ctrl->ctrl);
nvme_rdma_shutdown_ctrl(ctrl, false);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ /* state change failure should never happen */
+ WARN_ON_ONCE(1);
+ return;
+ }
+
ret = nvme_rdma_configure_admin_queue(ctrl, false);
if (ret)
goto out_fail;
@@ -2006,6 +2015,7 @@ out_free_ctrl:
static struct nvmf_transport_ops nvme_rdma_transport = {
.name = "rdma",
+ .module = THIS_MODULE,
.required_opts = NVMF_OPT_TRADDR,
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO,
@@ -2028,7 +2038,7 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
}
mutex_unlock(&nvme_rdma_ctrl_mutex);
- flush_workqueue(nvme_wq);
+ flush_workqueue(nvme_delete_wq);
}
static struct ib_client nvme_rdma_ib_client = {
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
new file mode 100644
index 0000000..41944bb
--- /dev/null
+++ b/drivers/nvme/host/trace.c
@@ -0,0 +1,130 @@
+/*
+ * NVM Express device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <asm/unaligned.h>
+#include "trace.h"
+
+static const char *nvme_trace_create_sq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 sqid = get_unaligned_le16(cdw10);
+ u16 qsize = get_unaligned_le16(cdw10 + 2);
+ u16 sq_flags = get_unaligned_le16(cdw10 + 4);
+ u16 cqid = get_unaligned_le16(cdw10 + 6);
+
+
+ trace_seq_printf(p, "sqid=%u, qsize=%u, sq_flags=0x%x, cqid=%u",
+ sqid, qsize, sq_flags, cqid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_create_cq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 cqid = get_unaligned_le16(cdw10);
+ u16 qsize = get_unaligned_le16(cdw10 + 2);
+ u16 cq_flags = get_unaligned_le16(cdw10 + 4);
+ u16 irq_vector = get_unaligned_le16(cdw10 + 6);
+
+ trace_seq_printf(p, "cqid=%u, qsize=%u, cq_flags=0x%x, irq_vector=%u",
+ cqid, qsize, cq_flags, irq_vector);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 cns = cdw10[0];
+ u16 ctrlid = get_unaligned_le16(cdw10 + 2);
+
+ trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+
+
+static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u16 length = get_unaligned_le16(cdw10 + 8);
+ u16 control = get_unaligned_le16(cdw10 + 10);
+ u32 dsmgmt = get_unaligned_le32(cdw10 + 12);
+ u32 reftag = get_unaligned_le32(cdw10 + 16);
+
+ trace_seq_printf(p,
+ "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u",
+ slba, length, control, dsmgmt, reftag);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "nr=%u, attributes=%u",
+ get_unaligned_le32(cdw10),
+ get_unaligned_le32(cdw10 + 4));
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "cdw10=%*ph", 24, cdw10);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_admin_create_sq:
+ return nvme_trace_create_sq(p, cdw10);
+ case nvme_admin_create_cq:
+ return nvme_trace_create_cq(p, cdw10);
+ case nvme_admin_identify:
+ return nvme_trace_admin_identify(p, cdw10);
+ default:
+ return nvme_trace_common(p, cdw10);
+ }
+}
+
+const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ case nvme_cmd_write_zeroes:
+ return nvme_trace_read_write(p, cdw10);
+ case nvme_cmd_dsm:
+ return nvme_trace_dsm(p, cdw10);
+ default:
+ return nvme_trace_common(p, cdw10);
+ }
+}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
new file mode 100644
index 0000000..ea91fcc
--- /dev/null
+++ b/drivers/nvme/host/trace.h
@@ -0,0 +1,165 @@
+/*
+ * NVM Express device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nvme
+
+#if !defined(_TRACE_NVME_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NVME_H
+
+#include <linux/nvme.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "nvme.h"
+
+#define nvme_admin_opcode_name(opcode) { opcode, #opcode }
+#define show_admin_opcode_name(val) \
+ __print_symbolic(val, \
+ nvme_admin_opcode_name(nvme_admin_delete_sq), \
+ nvme_admin_opcode_name(nvme_admin_create_sq), \
+ nvme_admin_opcode_name(nvme_admin_get_log_page), \
+ nvme_admin_opcode_name(nvme_admin_delete_cq), \
+ nvme_admin_opcode_name(nvme_admin_create_cq), \
+ nvme_admin_opcode_name(nvme_admin_identify), \
+ nvme_admin_opcode_name(nvme_admin_abort_cmd), \
+ nvme_admin_opcode_name(nvme_admin_set_features), \
+ nvme_admin_opcode_name(nvme_admin_get_features), \
+ nvme_admin_opcode_name(nvme_admin_async_event), \
+ nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
+ nvme_admin_opcode_name(nvme_admin_activate_fw), \
+ nvme_admin_opcode_name(nvme_admin_download_fw), \
+ nvme_admin_opcode_name(nvme_admin_ns_attach), \
+ nvme_admin_opcode_name(nvme_admin_keep_alive), \
+ nvme_admin_opcode_name(nvme_admin_directive_send), \
+ nvme_admin_opcode_name(nvme_admin_directive_recv), \
+ nvme_admin_opcode_name(nvme_admin_dbbuf), \
+ nvme_admin_opcode_name(nvme_admin_format_nvm), \
+ nvme_admin_opcode_name(nvme_admin_security_send), \
+ nvme_admin_opcode_name(nvme_admin_security_recv), \
+ nvme_admin_opcode_name(nvme_admin_sanitize_nvm))
+
+const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+#define __parse_nvme_admin_cmd(opcode, cdw10) \
+ nvme_trace_parse_admin_cmd(p, opcode, cdw10)
+
+#define nvme_opcode_name(opcode) { opcode, #opcode }
+#define show_opcode_name(val) \
+ __print_symbolic(val, \
+ nvme_opcode_name(nvme_cmd_flush), \
+ nvme_opcode_name(nvme_cmd_write), \
+ nvme_opcode_name(nvme_cmd_read), \
+ nvme_opcode_name(nvme_cmd_write_uncor), \
+ nvme_opcode_name(nvme_cmd_compare), \
+ nvme_opcode_name(nvme_cmd_write_zeroes), \
+ nvme_opcode_name(nvme_cmd_dsm), \
+ nvme_opcode_name(nvme_cmd_resv_register), \
+ nvme_opcode_name(nvme_cmd_resv_report), \
+ nvme_opcode_name(nvme_cmd_resv_acquire), \
+ nvme_opcode_name(nvme_cmd_resv_release))
+
+const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+#define __parse_nvme_cmd(opcode, cdw10) \
+ nvme_trace_parse_nvm_cmd(p, opcode, cdw10)
+
+TRACE_EVENT(nvme_setup_admin_cmd,
+ TP_PROTO(struct nvme_command *cmd),
+ TP_ARGS(cmd),
+ TP_STRUCT__entry(
+ __field(u8, opcode)
+ __field(u8, flags)
+ __field(u16, cid)
+ __field(u64, metadata)
+ __array(u8, cdw10, 24)
+ ),
+ TP_fast_assign(
+ __entry->opcode = cmd->common.opcode;
+ __entry->flags = cmd->common.flags;
+ __entry->cid = cmd->common.command_id;
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ memcpy(__entry->cdw10, cmd->common.cdw10,
+ sizeof(__entry->cdw10));
+ ),
+ TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ __entry->cid, __entry->flags, __entry->metadata,
+ show_admin_opcode_name(__entry->opcode),
+ __parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10))
+);
+
+
+TRACE_EVENT(nvme_setup_nvm_cmd,
+ TP_PROTO(int qid, struct nvme_command *cmd),
+ TP_ARGS(qid, cmd),
+ TP_STRUCT__entry(
+ __field(int, qid)
+ __field(u8, opcode)
+ __field(u8, flags)
+ __field(u16, cid)
+ __field(u32, nsid)
+ __field(u64, metadata)
+ __array(u8, cdw10, 24)
+ ),
+ TP_fast_assign(
+ __entry->qid = qid;
+ __entry->opcode = cmd->common.opcode;
+ __entry->flags = cmd->common.flags;
+ __entry->cid = cmd->common.command_id;
+ __entry->nsid = le32_to_cpu(cmd->common.nsid);
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ memcpy(__entry->cdw10, cmd->common.cdw10,
+ sizeof(__entry->cdw10));
+ ),
+ TP_printk("qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ __entry->qid, __entry->nsid, __entry->cid,
+ __entry->flags, __entry->metadata,
+ show_opcode_name(__entry->opcode),
+ __parse_nvme_cmd(__entry->opcode, __entry->cdw10))
+);
+
+TRACE_EVENT(nvme_complete_rq,
+ TP_PROTO(struct request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __field(int, qid)
+ __field(int, cid)
+ __field(u64, result)
+ __field(u8, retries)
+ __field(u8, flags)
+ __field(u16, status)
+ ),
+ TP_fast_assign(
+ __entry->qid = req->q->id;
+ __entry->cid = req->tag;
+ __entry->result = le64_to_cpu(nvme_req(req)->result.u64);
+ __entry->retries = nvme_req(req)->retries;
+ __entry->flags = nvme_req(req)->flags;
+ __entry->status = nvme_req(req)->status;
+ ),
+ TP_printk("cmdid=%u, qid=%d, res=%llu, retries=%u, flags=0x%x, status=%u",
+ __entry->cid, __entry->qid, __entry->result,
+ __entry->retries, __entry->flags, __entry->status)
+
+);
+
+#endif /* _TRACE_NVME_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 03e4ab6..5f4f8b1 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -29,6 +29,7 @@ config NVME_TARGET_RDMA
tristate "NVMe over Fabrics RDMA target support"
depends on INFINIBAND
depends on NVME_TARGET
+ select SGL_ALLOC
help
This enables the NVMe RDMA target support, which allows exporting NVMe
devices over RDMA.
@@ -39,6 +40,7 @@ config NVME_TARGET_FC
tristate "NVMe over Fabrics FC target driver"
depends on NVME_TARGET
depends on HAS_DMA
+ select SGL_ALLOC
help
This enables the NVMe FC target support, which allows exporting NVMe
devices over FC.
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b54748a..0bd7371 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -512,6 +512,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->sg_cnt = 0;
req->transfer_len = 0;
req->rsp->status = 0;
+ req->ns = NULL;
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
@@ -557,6 +558,8 @@ EXPORT_SYMBOL_GPL(nvmet_req_init);
void nvmet_req_uninit(struct nvmet_req *req)
{
percpu_ref_put(&req->sq->ref);
+ if (req->ns)
+ nvmet_put_namespace(req->ns);
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
@@ -830,7 +833,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
/* Don't accept keep-alive timeout for discovery controllers */
if (kato) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto out_free_sqs;
+ goto out_remove_ida;
}
/*
@@ -860,6 +863,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
*ctrlp = ctrl;
return 0;
+out_remove_ida:
+ ida_simple_remove(&cntlid_ida, ctrl->cntlid);
out_free_sqs:
kfree(ctrl->sqs);
out_free_cqs:
@@ -877,21 +882,22 @@ static void nvmet_ctrl_free(struct kref *ref)
struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
struct nvmet_subsys *subsys = ctrl->subsys;
- nvmet_stop_keep_alive_timer(ctrl);
-
mutex_lock(&subsys->lock);
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
+ nvmet_stop_keep_alive_timer(ctrl);
+
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
ida_simple_remove(&cntlid_ida, ctrl->cntlid);
- nvmet_subsys_put(subsys);
kfree(ctrl->sqs);
kfree(ctrl->cqs);
kfree(ctrl);
+
+ nvmet_subsys_put(subsys);
}
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index db3bf6b..19e9e42 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -225,7 +225,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out_ctrl_put;
}
- pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
+ pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
out:
kfree(d);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 5fd8603..9b39a6c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1697,31 +1697,12 @@ static int
nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
{
struct scatterlist *sg;
- struct page *page;
unsigned int nent;
- u32 page_len, length;
- int i = 0;
- length = fod->req.transfer_len;
- nent = DIV_ROUND_UP(length, PAGE_SIZE);
- sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
+ sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
if (!sg)
goto out;
- sg_init_table(sg, nent);
-
- while (length) {
- page_len = min_t(u32, length, PAGE_SIZE);
-
- page = alloc_page(GFP_KERNEL);
- if (!page)
- goto out_free_pages;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
-
fod->data_sg = sg;
fod->data_sg_cnt = nent;
fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
@@ -1731,14 +1712,6 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
return 0;
-out_free_pages:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
- fod->data_sg = NULL;
- fod->data_sg_cnt = 0;
out:
return NVME_SC_INTERNAL;
}
@@ -1746,18 +1719,13 @@ out:
static void
nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
{
- struct scatterlist *sg;
- int count;
-
if (!fod->data_sg || !fod->data_sg_cnt)
return;
fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
((fod->io_dir == NVMET_FCP_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE));
- for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
- __free_page(sg_page(sg));
- kfree(fod->data_sg);
+ sgl_free(fod->data_sg);
fod->data_sg = NULL;
fod->data_sg_cnt = 0;
}
@@ -2522,14 +2490,8 @@ nvmet_fc_add_port(struct nvmet_port *port)
list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
if ((tgtport->fc_target_port.node_name == traddr.nn) &&
(tgtport->fc_target_port.port_name == traddr.pn)) {
- /* a FC port can only be 1 nvmet port id */
- if (!tgtport->port) {
- tgtport->port = port;
- port->priv = tgtport;
- nvmet_fc_tgtport_get(tgtport);
- ret = 0;
- } else
- ret = -EALREADY;
+ tgtport->port = port;
+ ret = 0;
break;
}
}
@@ -2540,19 +2502,7 @@ nvmet_fc_add_port(struct nvmet_port *port)
static void
nvmet_fc_remove_port(struct nvmet_port *port)
{
- struct nvmet_fc_tgtport *tgtport = port->priv;
- unsigned long flags;
- bool matched = false;
-
- spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
- if (tgtport->port == port) {
- matched = true;
- tgtport->port = NULL;
- }
- spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
-
- if (matched)
- nvmet_fc_tgtport_put(tgtport);
+ /* nothing to do */
}
static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 7b75d9d..34712de 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -204,6 +204,10 @@ struct fcloop_lport {
struct completion unreg_done;
};
+struct fcloop_lport_priv {
+ struct fcloop_lport *lport;
+};
+
struct fcloop_rport {
struct nvme_fc_remote_port *remoteport;
struct nvmet_fc_target_port *targetport;
@@ -238,21 +242,32 @@ struct fcloop_lsreq {
int status;
};
+enum {
+ INI_IO_START = 0,
+ INI_IO_ACTIVE = 1,
+ INI_IO_ABORTED = 2,
+ INI_IO_COMPLETED = 3,
+};
+
struct fcloop_fcpreq {
struct fcloop_tport *tport;
struct nvmefc_fcp_req *fcpreq;
spinlock_t reqlock;
u16 status;
+ u32 inistate;
bool active;
bool aborted;
- struct work_struct work;
+ struct kref ref;
+ struct work_struct fcp_rcv_work;
+ struct work_struct abort_rcv_work;
+ struct work_struct tio_done_work;
struct nvmefc_tgt_fcp_req tgt_fcp_req;
};
struct fcloop_ini_fcpreq {
struct nvmefc_fcp_req *fcpreq;
struct fcloop_fcpreq *tfcp_req;
- struct work_struct iniwork;
+ spinlock_t inilock;
};
static inline struct fcloop_lsreq *
@@ -343,17 +358,122 @@ fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
return 0;
}
-/*
- * FCP IO operation done by initiator abort.
- * call back up initiator "done" flows.
- */
static void
-fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
+fcloop_tfcp_req_free(struct kref *ref)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(ref, struct fcloop_fcpreq, ref);
+
+ kfree(tfcp_req);
+}
+
+static void
+fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
+{
+ kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
+}
+
+static int
+fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
+{
+ return kref_get_unless_zero(&tfcp_req->ref);
+}
+
+static void
+fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
+ struct fcloop_fcpreq *tfcp_req, int status)
+{
+ struct fcloop_ini_fcpreq *inireq = NULL;
+
+ if (fcpreq) {
+ inireq = fcpreq->private;
+ spin_lock(&inireq->inilock);
+ inireq->tfcp_req = NULL;
+ spin_unlock(&inireq->inilock);
+
+ fcpreq->status = status;
+ fcpreq->done(fcpreq);
+ }
+
+ /* release original io reference on tgt struct */
+ fcloop_tfcp_req_put(tfcp_req);
+}
+
+static void
+fcloop_fcp_recv_work(struct work_struct *work)
{
- struct fcloop_ini_fcpreq *inireq =
- container_of(work, struct fcloop_ini_fcpreq, iniwork);
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
+ struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ int ret = 0;
+ bool aborted = false;
- inireq->fcpreq->done(inireq->fcpreq);
+ spin_lock(&tfcp_req->reqlock);
+ switch (tfcp_req->inistate) {
+ case INI_IO_START:
+ tfcp_req->inistate = INI_IO_ACTIVE;
+ break;
+ case INI_IO_ABORTED:
+ aborted = true;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock(&tfcp_req->reqlock);
+
+ if (unlikely(aborted))
+ ret = -ECANCELED;
+ else
+ ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
+ &tfcp_req->tgt_fcp_req,
+ fcpreq->cmdaddr, fcpreq->cmdlen);
+ if (ret)
+ fcloop_call_host_done(fcpreq, tfcp_req, ret);
+
+ return;
+}
+
+static void
+fcloop_fcp_abort_recv_work(struct work_struct *work)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, abort_rcv_work);
+ struct nvmefc_fcp_req *fcpreq;
+ bool completed = false;
+
+ spin_lock(&tfcp_req->reqlock);
+ fcpreq = tfcp_req->fcpreq;
+ switch (tfcp_req->inistate) {
+ case INI_IO_ABORTED:
+ break;
+ case INI_IO_COMPLETED:
+ completed = true;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock(&tfcp_req->reqlock);
+
+ if (unlikely(completed)) {
+ /* remove reference taken in original abort downcall */
+ fcloop_tfcp_req_put(tfcp_req);
+ return;
+ }
+
+ if (tfcp_req->tport->targetport)
+ nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
+ &tfcp_req->tgt_fcp_req);
+
+ spin_lock(&tfcp_req->reqlock);
+ tfcp_req->fcpreq = NULL;
+ spin_unlock(&tfcp_req->reqlock);
+
+ fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
+ /* call_host_done releases reference for abort downcall */
}
/*
@@ -364,20 +484,15 @@ static void
fcloop_tgt_fcprqst_done_work(struct work_struct *work)
{
struct fcloop_fcpreq *tfcp_req =
- container_of(work, struct fcloop_fcpreq, work);
- struct fcloop_tport *tport = tfcp_req->tport;
+ container_of(work, struct fcloop_fcpreq, tio_done_work);
struct nvmefc_fcp_req *fcpreq;
spin_lock(&tfcp_req->reqlock);
fcpreq = tfcp_req->fcpreq;
+ tfcp_req->inistate = INI_IO_COMPLETED;
spin_unlock(&tfcp_req->reqlock);
- if (tport->remoteport && fcpreq) {
- fcpreq->status = tfcp_req->status;
- fcpreq->done(fcpreq);
- }
-
- kfree(tfcp_req);
+ fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
}
@@ -390,7 +505,6 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
struct fcloop_rport *rport = remoteport->private;
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
struct fcloop_fcpreq *tfcp_req;
- int ret = 0;
if (!rport->targetport)
return -ECONNREFUSED;
@@ -401,16 +515,20 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
inireq->fcpreq = fcpreq;
inireq->tfcp_req = tfcp_req;
- INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
+ spin_lock_init(&inireq->inilock);
+
tfcp_req->fcpreq = fcpreq;
tfcp_req->tport = rport->targetport->private;
+ tfcp_req->inistate = INI_IO_START;
spin_lock_init(&tfcp_req->reqlock);
- INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
+ INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
+ INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
+ INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
+ kref_init(&tfcp_req->ref);
- ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
- fcpreq->cmdaddr, fcpreq->cmdlen);
+ schedule_work(&tfcp_req->fcp_rcv_work);
- return ret;
+ return 0;
}
static void
@@ -589,7 +707,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
- schedule_work(&tfcp_req->work);
+ schedule_work(&tfcp_req->tio_done_work);
}
static void
@@ -605,27 +723,47 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
void *hw_queue_handle,
struct nvmefc_fcp_req *fcpreq)
{
- struct fcloop_rport *rport = remoteport->private;
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
- struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
+ struct fcloop_fcpreq *tfcp_req;
+ bool abortio = true;
+
+ spin_lock(&inireq->inilock);
+ tfcp_req = inireq->tfcp_req;
+ if (tfcp_req)
+ fcloop_tfcp_req_get(tfcp_req);
+ spin_unlock(&inireq->inilock);
if (!tfcp_req)
/* abort has already been called */
return;
- if (rport->targetport)
- nvmet_fc_rcv_fcp_abort(rport->targetport,
- &tfcp_req->tgt_fcp_req);
-
/* break initiator/target relationship for io */
spin_lock(&tfcp_req->reqlock);
- inireq->tfcp_req = NULL;
- tfcp_req->fcpreq = NULL;
+ switch (tfcp_req->inistate) {
+ case INI_IO_START:
+ case INI_IO_ACTIVE:
+ tfcp_req->inistate = INI_IO_ABORTED;
+ break;
+ case INI_IO_COMPLETED:
+ abortio = false;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
spin_unlock(&tfcp_req->reqlock);
- /* post the aborted io completion */
- fcpreq->status = -ECANCELED;
- schedule_work(&inireq->iniwork);
+ if (abortio)
+ /* leave the reference while the work item is scheduled */
+ WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
+ else {
+ /*
+ * as the io has already had the done callback made,
+ * nothing more to do. So release the reference taken above
+ */
+ fcloop_tfcp_req_put(tfcp_req);
+ }
}
static void
@@ -657,7 +795,8 @@ fcloop_nport_get(struct fcloop_nport *nport)
static void
fcloop_localport_delete(struct nvme_fc_local_port *localport)
{
- struct fcloop_lport *lport = localport->private;
+ struct fcloop_lport_priv *lport_priv = localport->private;
+ struct fcloop_lport *lport = lport_priv->lport;
/* release any threads waiting for the unreg to complete */
complete(&lport->unreg_done);
@@ -697,7 +836,7 @@ static struct nvme_fc_port_template fctemplate = {
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
.dma_boundary = FCLOOP_DMABOUND_4G,
/* sizes of additional private data for data structures */
- .local_priv_sz = sizeof(struct fcloop_lport),
+ .local_priv_sz = sizeof(struct fcloop_lport_priv),
.remote_priv_sz = sizeof(struct fcloop_rport),
.lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
.fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
@@ -714,8 +853,7 @@ static struct nvmet_fc_target_template tgttemplate = {
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
.dma_boundary = FCLOOP_DMABOUND_4G,
/* optional features */
- .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
- NVMET_FCTGTFEAT_OPDONE_IN_ISR,
+ .target_features = 0,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct fcloop_tport),
};
@@ -728,11 +866,17 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
struct fcloop_ctrl_options *opts;
struct nvme_fc_local_port *localport;
struct fcloop_lport *lport;
- int ret;
+ struct fcloop_lport_priv *lport_priv;
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ lport = kzalloc(sizeof(*lport), GFP_KERNEL);
+ if (!lport)
+ return -ENOMEM;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
- return -ENOMEM;
+ goto out_free_lport;
ret = fcloop_parse_options(opts, buf);
if (ret)
@@ -752,23 +896,25 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
if (!ret) {
- unsigned long flags;
-
/* success */
- lport = localport->private;
+ lport_priv = localport->private;
+ lport_priv->lport = lport;
+
lport->localport = localport;
INIT_LIST_HEAD(&lport->lport_list);
spin_lock_irqsave(&fcloop_lock, flags);
list_add_tail(&lport->lport_list, &fcloop_lports);
spin_unlock_irqrestore(&fcloop_lock, flags);
-
- /* mark all of the input buffer consumed */
- ret = count;
}
out_free_opts:
kfree(opts);
+out_free_lport:
+ /* free only if we're going to fail */
+ if (ret)
+ kfree(lport);
+
return ret ? ret : count;
}
@@ -790,6 +936,8 @@ __wait_localport_unreg(struct fcloop_lport *lport)
wait_for_completion(&lport->unreg_done);
+ kfree(lport);
+
return ret;
}
@@ -1085,7 +1233,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fcloop_nport *nport = NULL, *tmpport;
- struct fcloop_tport *tport;
+ struct fcloop_tport *tport = NULL;
u64 nodename, portname;
unsigned long flags;
int ret;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 1e21b28..7991ec3 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -686,6 +686,7 @@ static struct nvmet_fabrics_ops nvme_loop_ops = {
static struct nvmf_transport_ops nvme_loop_transport = {
.name = "loop",
+ .module = THIS_MODULE,
.create_ctrl = nvme_loop_create_ctrl,
};
@@ -716,7 +717,7 @@ static void __exit nvme_loop_cleanup_module(void)
nvme_delete_ctrl(&ctrl->ctrl);
mutex_unlock(&nvme_loop_ctrl_mutex);
- flush_workqueue(nvme_wq);
+ flush_workqueue(nvme_delete_wq);
}
module_init(nvme_loop_init_module);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 4991290..978e169 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -185,59 +185,6 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
}
-static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
-{
- struct scatterlist *sg;
- int count;
-
- if (!sgl || !nents)
- return;
-
- for_each_sg(sgl, sg, nents, count)
- __free_page(sg_page(sg));
- kfree(sgl);
-}
-
-static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
- u32 length)
-{
- struct scatterlist *sg;
- struct page *page;
- unsigned int nent;
- int i = 0;
-
- nent = DIV_ROUND_UP(length, PAGE_SIZE);
- sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
- if (!sg)
- goto out;
-
- sg_init_table(sg, nent);
-
- while (length) {
- u32 page_len = min_t(u32, length, PAGE_SIZE);
-
- page = alloc_page(GFP_KERNEL);
- if (!page)
- goto out_free_pages;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
- *sgl = sg;
- *nents = nent;
- return 0;
-
-out_free_pages:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
-out:
- return NVME_SC_INTERNAL;
-}
-
static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *c, bool admin)
{
@@ -484,7 +431,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
}
if (rsp->req.sg != &rsp->cmd->inline_sg)
- nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt);
+ sgl_free(rsp->req.sg);
if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
nvmet_rdma_process_wr_wait_list(queue);
@@ -621,16 +568,14 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
u32 len = get_unaligned_le24(sgl->length);
u32 key = get_unaligned_le32(sgl->key);
int ret;
- u16 status;
/* no data command? */
if (!len)
return 0;
- status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
- len);
- if (status)
- return status;
+ rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
+ if (!rsp->req.sg)
+ return NVME_SC_INTERNAL;
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
@@ -976,7 +921,7 @@ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
{
- pr_info("freeing queue %d\n", queue->idx);
+ pr_debug("freeing queue %d\n", queue->idx);
nvmet_sq_destroy(&queue->nvme_sq);
@@ -1558,25 +1503,9 @@ err_ib_client:
static void __exit nvmet_rdma_exit(void)
{
- struct nvmet_rdma_queue *queue;
-
nvmet_unregister_transport(&nvmet_rdma_ops);
-
- flush_scheduled_work();
-
- mutex_lock(&nvmet_rdma_queue_mutex);
- while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list,
- struct nvmet_rdma_queue, queue_list))) {
- list_del_init(&queue->queue_list);
-
- mutex_unlock(&nvmet_rdma_queue_mutex);
- __nvmet_rdma_queue_disconnect(queue);
- mutex_lock(&nvmet_rdma_queue_mutex);
- }
- mutex_unlock(&nvmet_rdma_queue_mutex);
-
- flush_scheduled_work();
ib_unregister_client(&nvmet_rdma_ib_client);
+ WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
ida_destroy(&nvmet_rdma_queue_ida);
}
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
index a346b49..41d3a3c 100644
--- a/drivers/nvmem/meson-mx-efuse.c
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -156,8 +156,8 @@ static int meson_mx_efuse_read(void *context, unsigned int offset,
MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE,
MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE);
- for (i = offset; i < offset + bytes; i += efuse->config.word_size) {
- addr = i / efuse->config.word_size;
+ for (i = 0; i < bytes; i += efuse->config.word_size) {
+ addr = (offset + i) / efuse->config.word_size;
err = meson_mx_efuse_read_addr(efuse, addr, &tmp);
if (err)
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 9825858..a327be1 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -81,6 +81,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
* can be looked up later */
of_node_get(child);
phy->mdio.dev.of_node = child;
+ phy->mdio.dev.fwnode = of_fwnode_handle(child);
/* All data is now stored in the phy struct;
* register it */
@@ -111,6 +112,7 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
*/
of_node_get(child);
mdiodev->dev.of_node = child;
+ mdiodev->dev.fwnode = of_fwnode_handle(child);
/* All data is now stored in the mdiodev struct; register it. */
rc = mdio_device_register(mdiodev);
@@ -206,6 +208,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
mdio->phy_mask = ~0;
mdio->dev.of_node = np;
+ mdio->dev.fwnode = of_fwnode_handle(np);
/* Get bus level PHY reset GPIO details */
mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY;
@@ -228,7 +231,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
rc = of_mdiobus_register_phy(mdio, child, addr);
else
rc = of_mdiobus_register_device(mdio, child, addr);
- if (rc)
+
+ if (rc == -ENODEV)
+ dev_err(&mdio->dev,
+ "MDIO device at address %d is missing.\n",
+ addr);
+ else if (rc)
goto unregister;
}
@@ -252,7 +260,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
if (of_mdiobus_child_is_phy(child)) {
rc = of_mdiobus_register_phy(mdio, child, addr);
- if (rc)
+ if (rc && rc != -ENODEV)
goto unregister;
}
}
diff --git a/drivers/opp/Makefile b/drivers/opp/Makefile
index e70ceb4..6ce6aef 100644
--- a/drivers/opp/Makefile
+++ b/drivers/opp/Makefile
@@ -2,3 +2,4 @@ ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
obj-y += core.o cpu.o
obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-opp-supply.o
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
new file mode 100644
index 0000000..370eff3
--- /dev/null
+++ b/drivers/opp/ti-opp-supply.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon <nm@ti.com>
+ * Dave Gerlach <d-gerlach@ti.com>
+ *
+ * TI OPP supply driver that provides override into the regulator control
+ * for generic opp core to handle devices with ABB regulator and/or
+ * SmartReflex Class0.
+ */
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+/**
+ * struct ti_opp_supply_optimum_voltage_table - optimized voltage table
+ * @reference_uv: reference voltage (usually Nominal voltage)
+ * @optimized_uv: Optimized voltage from efuse
+ */
+struct ti_opp_supply_optimum_voltage_table {
+ unsigned int reference_uv;
+ unsigned int optimized_uv;
+};
+
+/**
+ * struct ti_opp_supply_data - OMAP specific opp supply data
+ * @vdd_table: Optimized voltage mapping table
+ * @num_vdd_table: number of entries in vdd_table
+ * @vdd_absolute_max_voltage_uv: absolute maximum voltage in UV for the supply
+ */
+struct ti_opp_supply_data {
+ struct ti_opp_supply_optimum_voltage_table *vdd_table;
+ u32 num_vdd_table;
+ u32 vdd_absolute_max_voltage_uv;
+};
+
+static struct ti_opp_supply_data opp_data;
+
+/**
+ * struct ti_opp_supply_of_data - device tree match data
+ * @flags: specific type of opp supply
+ * @efuse_voltage_mask: mask required for efuse register representing voltage
+ * @efuse_voltage_uv: Are the efuse entries in micro-volts? if not, assume
+ * milli-volts.
+ */
+struct ti_opp_supply_of_data {
+#define OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE BIT(1)
+#define OPPDM_HAS_NO_ABB BIT(2)
+ const u8 flags;
+ const u32 efuse_voltage_mask;
+ const bool efuse_voltage_uv;
+};
+
+/**
+ * _store_optimized_voltages() - store optimized voltages
+ * @dev: ti opp supply device for which we need to store info
+ * @data: data specific to the device
+ *
+ * Picks up efuse based optimized voltages for VDD unique per device and
+ * stores it in internal data structure for use during transition requests.
+ *
+ * Return: If successful, 0, else appropriate error value.
+ */
+static int _store_optimized_voltages(struct device *dev,
+ struct ti_opp_supply_data *data)
+{
+ void __iomem *base;
+ struct property *prop;
+ struct resource *res;
+ const __be32 *val;
+ int proplen, i;
+ int ret = 0;
+ struct ti_opp_supply_optimum_voltage_table *table;
+ const struct ti_opp_supply_of_data *of_data = dev_get_drvdata(dev);
+
+ /* pick up Efuse based voltages */
+ res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Unable to get IO resource\n");
+ ret = -ENODEV;
+ goto out_map;
+ }
+
+ base = ioremap_nocache(res->start, resource_size(res));
+ if (!base) {
+ dev_err(dev, "Unable to map Efuse registers\n");
+ ret = -ENOMEM;
+ goto out_map;
+ }
+
+ /* Fetch efuse-settings. */
+ prop = of_find_property(dev->of_node, "ti,efuse-settings", NULL);
+ if (!prop) {
+ dev_err(dev, "No 'ti,efuse-settings' property found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ proplen = prop->length / sizeof(int);
+ data->num_vdd_table = proplen / 2;
+ /* Verify for corrupted OPP entries in dt */
+ if (data->num_vdd_table * 2 * sizeof(int) != prop->length) {
+ dev_err(dev, "Invalid 'ti,efuse-settings'\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "ti,absolute-max-voltage-uv",
+ &data->vdd_absolute_max_voltage_uv);
+ if (ret) {
+ dev_err(dev, "ti,absolute-max-voltage-uv is missing\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ table = kzalloc(sizeof(*data->vdd_table) *
+ data->num_vdd_table, GFP_KERNEL);
+ if (!table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ data->vdd_table = table;
+
+ val = prop->value;
+ for (i = 0; i < data->num_vdd_table; i++, table++) {
+ u32 efuse_offset;
+ u32 tmp;
+
+ table->reference_uv = be32_to_cpup(val++);
+ efuse_offset = be32_to_cpup(val++);
+
+ tmp = readl(base + efuse_offset);
+ tmp &= of_data->efuse_voltage_mask;
+ tmp >>= __ffs(of_data->efuse_voltage_mask);
+
+ table->optimized_uv = of_data->efuse_voltage_uv ? tmp :
+ tmp * 1000;
+
+ dev_dbg(dev, "[%d] efuse=0x%08x volt_table=%d vset=%d\n",
+ i, efuse_offset, table->reference_uv,
+ table->optimized_uv);
+
+ /*
+ * Some older samples might not have optimized efuse
+ * Use reference voltage for those - just add debug message
+ * for them.
+ */
+ if (!table->optimized_uv) {
+ dev_dbg(dev, "[%d] efuse=0x%08x volt_table=%d:vset0\n",
+ i, efuse_offset, table->reference_uv);
+ table->optimized_uv = table->reference_uv;
+ }
+ }
+out:
+ iounmap(base);
+out_map:
+ return ret;
+}
+
+/**
+ * _free_optimized_voltages() - free resources for optvoltages
+ * @dev: device for which we need to free info
+ * @data: data specific to the device
+ */
+static void _free_optimized_voltages(struct device *dev,
+ struct ti_opp_supply_data *data)
+{
+ kfree(data->vdd_table);
+ data->vdd_table = NULL;
+ data->num_vdd_table = 0;
+}
+
+/**
+ * _get_optimal_vdd_voltage() - Finds optimal voltage for the supply
+ * @dev: device for which we need to find info
+ * @data: data specific to the device
+ * @reference_uv: reference voltage (OPP voltage) for which we need value
+ *
+ * Return: if a match is found, return optimized voltage, else return
+ * reference_uv, also return reference_uv if no optimization is needed.
+ */
+static int _get_optimal_vdd_voltage(struct device *dev,
+ struct ti_opp_supply_data *data,
+ int reference_uv)
+{
+ int i;
+ struct ti_opp_supply_optimum_voltage_table *table;
+
+ if (!data->num_vdd_table)
+ return reference_uv;
+
+ table = data->vdd_table;
+ if (!table)
+ return -EINVAL;
+
+ /* Find a exact match - this list is usually very small */
+ for (i = 0; i < data->num_vdd_table; i++, table++)
+ if (table->reference_uv == reference_uv)
+ return table->optimized_uv;
+
+ /* IF things are screwed up, we'd make a mess on console.. ratelimit */
+ dev_err_ratelimited(dev, "%s: Failed optimized voltage match for %d\n",
+ __func__, reference_uv);
+ return reference_uv;
+}
+
+static int _opp_set_voltage(struct device *dev,
+ struct dev_pm_opp_supply *supply,
+ int new_target_uv, struct regulator *reg,
+ char *reg_name)
+{
+ int ret;
+ unsigned long vdd_uv, uv_max;
+
+ if (new_target_uv)
+ vdd_uv = new_target_uv;
+ else
+ vdd_uv = supply->u_volt;
+
+ /*
+ * If we do have an absolute max voltage specified, then we should
+ * use that voltage instead to allow for cases where the voltage rails
+ * are ganged (example if we set the max for an opp as 1.12v, and
+ * the absolute max is 1.5v, for another rail to get 1.25v, it cannot
+ * be achieved if the regulator is constrainted to max of 1.12v, even
+ * if it can function at 1.25v
+ */
+ if (opp_data.vdd_absolute_max_voltage_uv)
+ uv_max = opp_data.vdd_absolute_max_voltage_uv;
+ else
+ uv_max = supply->u_volt_max;
+
+ if (vdd_uv > uv_max ||
+ vdd_uv < supply->u_volt_min ||
+ supply->u_volt_min > uv_max) {
+ dev_warn(dev,
+ "Invalid range voltages [Min:%lu target:%lu Max:%lu]\n",
+ supply->u_volt_min, vdd_uv, uv_max);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s scaling to %luuV[min %luuV max %luuV]\n", reg_name,
+ vdd_uv, supply->u_volt_min,
+ uv_max);
+
+ ret = regulator_set_voltage_triplet(reg,
+ supply->u_volt_min,
+ vdd_uv,
+ uv_max);
+ if (ret) {
+ dev_err(dev, "%s failed for %luuV[min %luuV max %luuV]\n",
+ reg_name, vdd_uv, supply->u_volt_min,
+ uv_max);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ti_opp_supply_set_opp() - do the opp supply transition
+ * @data: information on regulators and new and old opps provided by
+ * opp core to use in transition
+ *
+ * Return: If successful, 0, else appropriate error value.
+ */
+static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
+{
+ struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
+ struct dev_pm_opp_supply *old_supply_vbb = &data->old_opp.supplies[1];
+ struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
+ struct dev_pm_opp_supply *new_supply_vbb = &data->new_opp.supplies[1];
+ struct device *dev = data->dev;
+ unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
+ struct clk *clk = data->clk;
+ struct regulator *vdd_reg = data->regulators[0];
+ struct regulator *vbb_reg = data->regulators[1];
+ int vdd_uv;
+ int ret;
+
+ vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
+ new_supply_vbb->u_volt);
+
+ /* Scaling up? Scale voltage before frequency */
+ if (freq > old_freq) {
+ ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
+ "vdd");
+ if (ret)
+ goto restore_voltage;
+
+ ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ goto restore_voltage;
+ }
+
+ /* Change frequency */
+ dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
+ __func__, old_freq, freq);
+
+ ret = clk_set_rate(clk, freq);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ goto restore_voltage;
+ }
+
+ /* Scaling down? Scale voltage after frequency */
+ if (freq < old_freq) {
+ ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ goto restore_freq;
+
+ ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
+ "vdd");
+ if (ret)
+ goto restore_freq;
+ }
+
+ return 0;
+
+restore_freq:
+ ret = clk_set_rate(clk, old_freq);
+ if (ret)
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+ __func__, old_freq);
+restore_voltage:
+ /* This shouldn't harm even if the voltages weren't updated earlier */
+ if (old_supply_vdd->u_volt) {
+ ret = _opp_set_voltage(dev, old_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ return ret;
+
+ ret = _opp_set_voltage(dev, old_supply_vdd, 0, vdd_reg,
+ "vdd");
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct ti_opp_supply_of_data omap_generic_of_data = {
+};
+
+static const struct ti_opp_supply_of_data omap_omap5_of_data = {
+ .flags = OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE,
+ .efuse_voltage_mask = 0xFFF,
+ .efuse_voltage_uv = false,
+};
+
+static const struct ti_opp_supply_of_data omap_omap5core_of_data = {
+ .flags = OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE | OPPDM_HAS_NO_ABB,
+ .efuse_voltage_mask = 0xFFF,
+ .efuse_voltage_uv = false,
+};
+
+static const struct of_device_id ti_opp_supply_of_match[] = {
+ {.compatible = "ti,omap-opp-supply", .data = &omap_generic_of_data},
+ {.compatible = "ti,omap5-opp-supply", .data = &omap_omap5_of_data},
+ {.compatible = "ti,omap5-core-opp-supply",
+ .data = &omap_omap5core_of_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_opp_supply_of_match);
+
+static int ti_opp_supply_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *cpu_dev = get_cpu_device(0);
+ const struct of_device_id *match;
+ const struct ti_opp_supply_of_data *of_data;
+ int ret = 0;
+
+ match = of_match_device(ti_opp_supply_of_match, dev);
+ if (!match) {
+ /* We do not expect this to happen */
+ dev_err(dev, "%s: Unable to match device\n", __func__);
+ return -ENODEV;
+ }
+ if (!match->data) {
+ /* Again, unlikely.. but mistakes do happen */
+ dev_err(dev, "%s: Bad data in match\n", __func__);
+ return -EINVAL;
+ }
+ of_data = match->data;
+
+ dev_set_drvdata(dev, (void *)of_data);
+
+ /* If we need optimized voltage */
+ if (of_data->flags & OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE) {
+ ret = _store_optimized_voltages(dev, &opp_data);
+ if (ret)
+ return ret;
+ }
+
+ ret = PTR_ERR_OR_ZERO(dev_pm_opp_register_set_opp_helper(cpu_dev,
+ ti_opp_supply_set_opp));
+ if (ret)
+ _free_optimized_voltages(dev, &opp_data);
+
+ return ret;
+}
+
+static struct platform_driver ti_opp_supply_driver = {
+ .probe = ti_opp_supply_probe,
+ .driver = {
+ .name = "ti_opp_supply",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ti_opp_supply_of_match),
+ },
+};
+module_platform_driver(ti_opp_supply_driver);
+
+MODULE_DESCRIPTION("Texas Instruments OMAP OPP Supply driver");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 0b3fb99..7390fb8 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -303,7 +303,7 @@ static void dino_mask_irq(struct irq_data *d)
struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
- DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
+ DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
/* Clear the matching bit in the IMR register */
dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
@@ -316,7 +316,7 @@ static void dino_unmask_irq(struct irq_data *d)
int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
- DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
+ DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq);
/*
** clear pending IRQ bits
@@ -396,7 +396,7 @@ ilr_again:
if (mask) {
if (--ilr_loop > 0)
goto ilr_again;
- printk(KERN_ERR "Dino 0x%p: stuck interrupt %d\n",
+ printk(KERN_ERR "Dino 0x%px: stuck interrupt %d\n",
dino_dev->hba.base_addr, mask);
return IRQ_NONE;
}
@@ -553,7 +553,7 @@ dino_fixup_bus(struct pci_bus *bus)
struct pci_dev *dev;
struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
- DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n",
+ DBG(KERN_WARNING "%s(0x%px) bus %d platform_data 0x%px\n",
__func__, bus, bus->busn_res.start,
bus->bridge->platform_data);
@@ -854,7 +854,7 @@ static int __init dino_common_init(struct parisc_device *dev,
res->flags = IORESOURCE_IO; /* do not mark it busy ! */
if (request_resource(&ioport_resource, res) < 0) {
printk(KERN_ERR "%s: request I/O Port region failed "
- "0x%lx/%lx (hpa 0x%p)\n",
+ "0x%lx/%lx (hpa 0x%px)\n",
name, (unsigned long)res->start, (unsigned long)res->end,
dino_dev->hba.base_addr);
return 1;
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 4dd9b13..99a80da 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -106,7 +106,7 @@ static int __init eisa_eeprom_init(void)
return retval;
}
- printk(KERN_INFO "EISA EEPROM at 0x%p\n", eisa_eeprom_addr);
+ printk(KERN_INFO "EISA EEPROM at 0x%px\n", eisa_eeprom_addr);
return 0;
}
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index a25fed5..41b740a 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1692,3 +1692,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
iounmap(base_addr);
}
+
+/*
+ * The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
+ * seems rushed, so that many built-in components simply don't work.
+ * The following quirks disable the serial AUX port and the built-in ATI RV100
+ * Radeon 7000 graphics card which both don't have any external connectors and
+ * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
+ * such makes those machines the only PARISC machines on which we can't use
+ * ttyS0 as boot console.
+ */
+static void quirk_diva_ati_card(struct pci_dev *dev)
+{
+ if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+ dev->subsystem_device != 0x1292)
+ return;
+
+ dev_info(&dev->dev, "Hiding Diva built-in ATI card");
+ dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
+ quirk_diva_ati_card);
+
+static void quirk_diva_aux_disable(struct pci_dev *dev)
+{
+ if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+ dev->subsystem_device != 0x1291)
+ return;
+
+ dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
+ dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
+ quirk_diva_aux_disable);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 04dac6a..6b8d060 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -985,9 +985,7 @@ static u32 hv_compose_msi_req_v1(
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode =
- (apic->irq_delivery_mode == dest_LowestPrio) ?
- dest_LowestPrio : dest_Fixed;
+ int_pkt->int_desc.delivery_mode = dest_Fixed;
/*
* Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
@@ -1008,9 +1006,7 @@ static u32 hv_compose_msi_req_v2(
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode =
- (apic->irq_delivery_mode == dest_LowestPrio) ?
- dest_LowestPrio : dest_Fixed;
+ int_pkt->int_desc.delivery_mode = dest_Fixed;
/*
* Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 12796ec..52ab3cb 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -1128,12 +1128,12 @@ static int rcar_pcie_probe(struct platform_device *pdev)
err = rcar_pcie_get_resources(pcie);
if (err < 0) {
dev_err(dev, "failed to request resources: %d\n", err);
- goto err_free_bridge;
+ goto err_free_resource_list;
}
err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
if (err)
- goto err_free_bridge;
+ goto err_free_resource_list;
pm_runtime_enable(dev);
err = pm_runtime_get_sync(dev);
@@ -1176,9 +1176,9 @@ err_pm_put:
err_pm_disable:
pm_runtime_disable(dev);
-err_free_bridge:
- pci_free_host_bridge(bridge);
+err_free_resource_list:
pci_free_resource_list(&pcie->resources);
+ pci_free_host_bridge(bridge);
return err;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 7f47bb7..5958c8d 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -699,7 +699,7 @@ static void pci_pm_complete(struct device *dev)
pm_generic_complete(dev);
/* Resume device if platform firmware has put it in reset-power-on */
- if (dev->power.direct_complete && pm_resume_via_firmware()) {
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
pci_power_t pre_sleep_state = pci_dev->current_state;
pci_update_current_state(pci_dev, pci_dev->current_state);
@@ -783,8 +783,10 @@ static int pci_pm_suspend_noirq(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.may_skip_resume = true;
return 0;
+ }
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
@@ -838,6 +840,16 @@ static int pci_pm_suspend_noirq(struct device *dev)
Fixup:
pci_fixup_device(pci_fixup_suspend_late, pci_dev);
+ /*
+ * If the target system sleep state is suspend-to-idle, it is sufficient
+ * to check whether or not the device's wakeup settings are good for
+ * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
+ * pci_pm_complete() to take care of fixing up the device's state
+ * anyway, if need be.
+ */
+ dev->power.may_skip_resume = device_may_wakeup(dev) ||
+ !device_can_wakeup(dev);
+
return 0;
}
@@ -847,6 +859,9 @@ static int pci_pm_resume_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ if (dev_pm_may_skip_resume(dev))
+ return 0;
+
/*
* Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
* during system suspend, so update their runtime PM status to "active"
@@ -953,7 +968,7 @@ static int pci_pm_freeze_late(struct device *dev)
if (dev_pm_smart_suspend_and_suspended(dev))
return 0;
- return pm_generic_freeze_late(dev);;
+ return pm_generic_freeze_late(dev);
}
static int pci_pm_freeze_noirq(struct device *dev)
@@ -999,7 +1014,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
* the subsequent "thaw" callbacks for the device.
*/
if (dev_pm_smart_suspend_and_suspended(dev)) {
- dev->power.direct_complete = true;
+ dev_pm_skip_next_resume_phases(dev);
return 0;
}
@@ -1012,7 +1027,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
- pci_update_current_state(pci_dev, PCI_D0);
+ /*
+ * pci_restore_state() requires the device to be in D0 (because of MSI
+ * restoration among other things), so force it into D0 in case the
+ * driver's "freeze" callbacks put it into a low-power state directly.
+ */
+ pci_set_power_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev);
if (drv && drv->pm && drv->pm->thaw_noirq)
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index ffbf4e7..fb1c1bb 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -150,6 +150,9 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
pci_save_state(dev);
+ dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED);
+
if (pci_bridge_d3_possible(dev)) {
/*
* Keep the port resumed 100ms to make sure things like
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index accaaac..6601ad0 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -310,7 +310,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev,
int irq, error;
irq = platform_get_irq_byname(pdev, name);
- if (!irq)
+ if (irq < 0)
return -ENODEV;
error = devm_request_threaded_irq(ddata->dev, irq, NULL,
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index b4964b0..8f6e8e2 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -410,6 +410,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
if (ret)
return ERR_PTR(-ENODEV);
+ /* This phy type handled by the usb-phy subsystem for now */
+ if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
+ return ERR_PTR(-ENODEV);
+
mutex_lock(&phy_provider_mutex);
phy_provider = of_phy_provider_lookup(args.np);
if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
diff --git a/drivers/phy/renesas/Kconfig b/drivers/phy/renesas/Kconfig
index cb09245..c845fac 100644
--- a/drivers/phy/renesas/Kconfig
+++ b/drivers/phy/renesas/Kconfig
@@ -12,7 +12,9 @@ config PHY_RCAR_GEN3_USB2
tristate "Renesas R-Car generation 3 USB 2.0 PHY driver"
depends on ARCH_RENESAS
depends on EXTCON
+ depends on USB_SUPPORT
select GENERIC_PHY
+ select USB_COMMON
help
Support for USB 2.0 PHY found on Renesas R-Car generation 3 SoCs.
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index ee85fa0..7492c89 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1137,6 +1137,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
if (IS_ERR(phy)) {
dev_err(dev, "failed to create phy: %s\n",
child_np->name);
+ pm_runtime_disable(dev);
return PTR_ERR(phy);
}
@@ -1146,6 +1147,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
dev_err(dev, "Failed to register phy provider\n");
+ pm_runtime_disable(dev);
return PTR_ERR(phy_provider);
}
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 4307bf0..63e916d 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -75,14 +75,14 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
static struct device_node *
tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
{
- /*
- * of_find_node_by_name() drops a reference, so make sure to grab one.
- */
- struct device_node *np = of_node_get(padctl->dev->of_node);
+ struct device_node *pads, *np;
+
+ pads = of_get_child_by_name(padctl->dev->of_node, "pads");
+ if (!pads)
+ return NULL;
- np = of_find_node_by_name(np, "pads");
- if (np)
- np = of_find_node_by_name(np, name);
+ np = of_get_child_by_name(pads, name);
+ of_node_put(pads);
return np;
}
@@ -90,16 +90,16 @@ tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
static struct device_node *
tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index)
{
- /*
- * of_find_node_by_name() drops a reference, so make sure to grab one.
- */
- struct device_node *np = of_node_get(pad->dev.of_node);
+ struct device_node *np, *lanes;
- np = of_find_node_by_name(np, "lanes");
- if (!np)
+ lanes = of_get_child_by_name(pad->dev.of_node, "lanes");
+ if (!lanes)
return NULL;
- return of_find_node_by_name(np, pad->soc->lanes[index].name);
+ np = of_get_child_by_name(lanes, pad->soc->lanes[index].name);
+ of_node_put(lanes);
+
+ return np;
}
static int
@@ -195,7 +195,7 @@ int tegra_xusb_pad_register(struct tegra_xusb_pad *pad,
unsigned int i;
int err;
- children = of_find_node_by_name(pad->dev.of_node, "lanes");
+ children = of_get_child_by_name(pad->dev.of_node, "lanes");
if (!children)
return -ENODEV;
@@ -444,21 +444,21 @@ static struct device_node *
tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
unsigned int index)
{
- /*
- * of_find_node_by_name() drops a reference, so make sure to grab one.
- */
- struct device_node *np = of_node_get(padctl->dev->of_node);
+ struct device_node *ports, *np;
+ char *name;
- np = of_find_node_by_name(np, "ports");
- if (np) {
- char *name;
+ ports = of_get_child_by_name(padctl->dev->of_node, "ports");
+ if (!ports)
+ return NULL;
- name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
- if (!name)
- return ERR_PTR(-ENOMEM);
- np = of_find_node_by_name(np, name);
- kfree(name);
+ name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
+ if (!name) {
+ of_node_put(ports);
+ return ERR_PTR(-ENOMEM);
}
+ np = of_get_child_by_name(ports, name);
+ kfree(name);
+ of_node_put(ports);
return np;
}
@@ -847,7 +847,7 @@ static void tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
static int tegra_xusb_padctl_probe(struct platform_device *pdev)
{
- struct device_node *np = of_node_get(pdev->dev.of_node);
+ struct device_node *np = pdev->dev.of_node;
const struct tegra_xusb_padctl_soc *soc;
struct tegra_xusb_padctl *padctl;
const struct of_device_id *match;
@@ -855,7 +855,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
int err;
/* for backwards compatibility with old device trees */
- np = of_find_node_by_name(np, "pads");
+ np = of_get_child_by_name(np, "pads");
if (!np) {
dev_warn(&pdev->dev, "deprecated DT, using legacy driver\n");
return tegra_xusb_padctl_legacy_probe(pdev);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index bdedb63..4471fd9 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1620,6 +1620,22 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
clear_bit(i, chip->irq.valid_mask);
}
+ /*
+ * The same set of machines in chv_no_valid_mask[] have incorrectly
+ * configured GPIOs that generate spurious interrupts so we use
+ * this same list to apply another quirk for them.
+ *
+ * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953.
+ */
+ if (!need_valid_mask) {
+ /*
+ * Mask all interrupts the community is able to generate
+ * but leave the ones that can only generate GPEs unmasked.
+ */
+ chv_writel(GENMASK(31, pctrl->community->nirqs),
+ pctrl->regs + CHV_INTMASK);
+ }
+
/* Clear all interrupts */
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index e6cd8de..3501491 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -222,6 +222,9 @@ static enum pin_config_param pcs_bias[] = {
*/
static struct lock_class_key pcs_lock_class;
+/* Class for the IRQ request mutex */
+static struct lock_class_key pcs_request_class;
+
/*
* REVISIT: Reads and writes could eventually use regmap or something
* generic. But at least on omaps, some mux registers are performance
@@ -1486,7 +1489,7 @@ static int pcs_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_data(irq, pcs_soc);
irq_set_chip_and_handler(irq, &pcs->chip,
handle_level_irq);
- irq_set_lockdep_class(irq, &pcs_lock_class);
+ irq_set_lockdep_class(irq, &pcs_lock_class, &pcs_request_class);
irq_set_noprobe(irq);
return 0;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index a276c61..e62ab08 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -290,7 +290,7 @@ static int stm32_gpio_domain_translate(struct irq_domain *d,
}
static int stm32_gpio_domain_activate(struct irq_domain *d,
- struct irq_data *irq_data, bool early)
+ struct irq_data *irq_data, bool reserve)
{
struct stm32_gpio_bank *bank = d->host_data;
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 0ad6e29..e728a96 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -38,14 +38,8 @@ config CHROMEOS_PSTORE
If you have a supported Chromebook, choose Y or M here.
The module will be called chromeos_pstore.
-config CROS_EC_CHARDEV
- tristate "Chrome OS Embedded Controller userspace device interface"
- depends on MFD_CROS_EC
- ---help---
- This driver adds support to talk with the ChromeOS EC from userspace.
-
- If you have a supported Chromebook, choose Y or M here.
- The module will be called cros_ec_dev.
+config CROS_EC_CTL
+ tristate
config CROS_EC_LPC
tristate "ChromeOS Embedded Controller (LPC)"
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index a077b1f..ff3b369 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -2,10 +2,9 @@
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
-cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \
- cros_ec_lightbar.o cros_ec_vbc.o \
- cros_ec_debugfs.o
-obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o
+cros_ec_ctl-objs := cros_ec_sysfs.o cros_ec_lightbar.o \
+ cros_ec_vbc.o cros_ec_debugfs.o
+obj-$(CONFIG_CROS_EC_CTL) += cros_ec_ctl.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_reg.o
cros_ec_lpcs-$(CONFIG_CROS_EC_LPC_MEC) += cros_ec_lpc_mec.o
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 4cc66f4..98a35d3 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -29,9 +29,6 @@
#include <linux/slab.h>
#include <linux/wait.h>
-#include "cros_ec_dev.h"
-#include "cros_ec_debugfs.h"
-
#define LOG_SHIFT 14
#define LOG_SIZE (1 << LOG_SHIFT)
#define LOG_POLL_SEC 10
@@ -390,6 +387,7 @@ remove_debugfs:
debugfs_remove_recursive(debug_info->dir);
return ret;
}
+EXPORT_SYMBOL(cros_ec_debugfs_init);
void cros_ec_debugfs_remove(struct cros_ec_dev *ec)
{
@@ -399,3 +397,4 @@ void cros_ec_debugfs_remove(struct cros_ec_dev *ec)
debugfs_remove_recursive(ec->debug_info->dir);
cros_ec_cleanup_console_log(ec->debug_info);
}
+EXPORT_SYMBOL(cros_ec_debugfs_remove);
diff --git a/drivers/platform/chrome/cros_ec_debugfs.h b/drivers/platform/chrome/cros_ec_debugfs.h
deleted file mode 100644
index 1ff3a50..0000000
--- a/drivers/platform/chrome/cros_ec_debugfs.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright 2015 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _DRV_CROS_EC_DEBUGFS_H_
-#define _DRV_CROS_EC_DEBUGFS_H_
-
-#include "cros_ec_dev.h"
-
-/* debugfs stuff */
-int cros_ec_debugfs_init(struct cros_ec_dev *ec);
-void cros_ec_debugfs_remove(struct cros_ec_dev *ec);
-
-#endif /* _DRV_CROS_EC_DEBUGFS_H_ */
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index fd2b047..6ea79d4 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -33,8 +33,6 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
-#include "cros_ec_dev.h"
-
/* Rate-limit the lightbar interface to prevent DoS. */
static unsigned long lb_interval_jiffies = 50 * HZ / 1000;
@@ -414,6 +412,7 @@ error:
return ret;
}
+EXPORT_SYMBOL(lb_manual_suspend_ctrl);
int lb_suspend(struct cros_ec_dev *ec)
{
@@ -422,6 +421,7 @@ int lb_suspend(struct cros_ec_dev *ec)
return lb_send_empty_cmd(ec, LIGHTBAR_CMD_SUSPEND);
}
+EXPORT_SYMBOL(lb_suspend);
int lb_resume(struct cros_ec_dev *ec)
{
@@ -430,6 +430,7 @@ int lb_resume(struct cros_ec_dev *ec)
return lb_send_empty_cmd(ec, LIGHTBAR_CMD_RESUME);
}
+EXPORT_SYMBOL(lb_resume);
static ssize_t sequence_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -622,3 +623,4 @@ struct attribute_group cros_ec_lightbar_attr_group = {
.attrs = __lb_cmds_attrs,
.is_visible = cros_ec_lightbar_attrs_are_visible,
};
+EXPORT_SYMBOL(cros_ec_lightbar_attr_group);
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index f3baf99..d6eebe8 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -34,8 +34,6 @@
#include <linux/types.h>
#include <linux/uaccess.h>
-#include "cros_ec_dev.h"
-
/* Accessor functions */
static ssize_t show_ec_reboot(struct device *dev,
@@ -294,4 +292,7 @@ static struct attribute *__ec_attrs[] = {
struct attribute_group cros_ec_attr_group = {
.attrs = __ec_attrs,
};
+EXPORT_SYMBOL(cros_ec_attr_group);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC control driver");
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index 564a0d0..6d38e6b 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -135,3 +135,4 @@ struct attribute_group cros_ec_vbc_attr_group = {
.bin_attrs = cros_ec_vbc_bin_attrs,
.is_bin_visible = cros_ec_vbc_is_visible,
};
+EXPORT_SYMBOL(cros_ec_vbc_attr_group);
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index f379616..d4aeac3 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -118,6 +118,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event)
return;
}
input_report_key(data->idev, KEY_RFKILL, 1);
+ input_sync(data->idev);
input_report_key(data->idev, KEY_RFKILL, 0);
input_sync(data->idev);
}
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index bf897b1..cd4725e 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -37,6 +37,7 @@
struct quirk_entry {
u8 touchpad_led;
+ u8 kbd_led_levels_off_1;
int needs_kbd_timeouts;
/*
@@ -67,6 +68,10 @@ static struct quirk_entry quirk_dell_xps13_9333 = {
.kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
};
+static struct quirk_entry quirk_dell_latitude_e6410 = {
+ .kbd_led_levels_off_1 = 1,
+};
+
static struct platform_driver platform_driver = {
.driver = {
.name = "dell-laptop",
@@ -269,6 +274,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
},
.driver_data = &quirk_dell_xps13_9333,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Latitude E6410",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6410"),
+ },
+ .driver_data = &quirk_dell_latitude_e6410,
+ },
{ }
};
@@ -1149,6 +1163,9 @@ static int kbd_get_info(struct kbd_info *info)
units = (buffer->output[2] >> 8) & 0xFF;
info->levels = (buffer->output[2] >> 16) & 0xFF;
+ if (quirks && quirks->kbd_led_levels_off_1 && info->levels)
+ info->levels--;
+
if (units & BIT(0))
info->seconds = (buffer->output[3] >> 0) & 0xFF;
if (units & BIT(1))
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 39d2f45..fb25b20 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -639,6 +639,8 @@ static int dell_wmi_events_set_enabled(bool enable)
int ret;
buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
buffer->cmd_class = CLASS_INFO;
buffer->cmd_select = SELECT_APP_REGISTRATION;
buffer->input[0] = 0x10000;
diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c
index 6505c97..1b49169 100644
--- a/drivers/platform/x86/surfacepro3_button.c
+++ b/drivers/platform/x86/surfacepro3_button.c
@@ -119,7 +119,7 @@ static void surface_button_notify(struct acpi_device *device, u32 event)
if (key_code == KEY_RESERVED)
return;
if (pressed)
- pm_wakeup_event(&device->dev, 0);
+ pm_wakeup_dev_event(&device->dev, 0, button->suspended);
if (button->suspended)
return;
input_report_key(input, key_code, pressed?1:0);
@@ -185,6 +185,8 @@ static int surface_button_add(struct acpi_device *device)
error = input_register_device(input);
if (error)
goto err_free_input;
+
+ device_init_wakeup(&device->dev, true);
dev_info(&device->dev,
"%s [%s]\n", name, acpi_device_bid(device));
return 0;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 791449a..daa68ac 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -1458,5 +1458,5 @@ static void __exit acpi_wmi_exit(void)
class_unregister(&wmi_bus_class);
}
-subsys_initcall(acpi_wmi_init);
+subsys_initcall_sync(acpi_wmi_init);
module_exit(acpi_wmi_exit);
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index e681140..077f334 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -581,10 +581,7 @@ static int __init pnpbios_thread_init(void)
init_completion(&unload_sem);
task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd");
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- return 0;
+ return PTR_ERR_OR_ZERO(task);
}
/* Start the kernel thread later: */
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index f054cdd..803666a 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/pnp.h>
#include <linux/io.h>
-#include <linux/kallsyms.h>
#include "base.h"
static void quirk_awe32_add_ports(struct pnp_dev *dev,
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 75f63e3..ed2b109 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -76,7 +76,7 @@ struct rockchip_iodomain_supply {
struct rockchip_iodomain {
struct device *dev;
struct regmap *grf;
- struct rockchip_iodomain_soc_data *soc_data;
+ const struct rockchip_iodomain_soc_data *soc_data;
struct rockchip_iodomain_supply supplies[MAX_SUPPLIES];
};
@@ -382,43 +382,43 @@ static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = {
static const struct of_device_id rockchip_iodomain_match[] = {
{
.compatible = "rockchip,rk3188-io-voltage-domain",
- .data = (void *)&soc_data_rk3188
+ .data = &soc_data_rk3188
},
{
.compatible = "rockchip,rk3228-io-voltage-domain",
- .data = (void *)&soc_data_rk3228
+ .data = &soc_data_rk3228
},
{
.compatible = "rockchip,rk3288-io-voltage-domain",
- .data = (void *)&soc_data_rk3288
+ .data = &soc_data_rk3288
},
{
.compatible = "rockchip,rk3328-io-voltage-domain",
- .data = (void *)&soc_data_rk3328
+ .data = &soc_data_rk3328
},
{
.compatible = "rockchip,rk3368-io-voltage-domain",
- .data = (void *)&soc_data_rk3368
+ .data = &soc_data_rk3368
},
{
.compatible = "rockchip,rk3368-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rk3368_pmu
+ .data = &soc_data_rk3368_pmu
},
{
.compatible = "rockchip,rk3399-io-voltage-domain",
- .data = (void *)&soc_data_rk3399
+ .data = &soc_data_rk3399
},
{
.compatible = "rockchip,rk3399-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rk3399_pmu
+ .data = &soc_data_rk3399_pmu
},
{
.compatible = "rockchip,rv1108-io-voltage-domain",
- .data = (void *)&soc_data_rv1108
+ .data = &soc_data_rv1108
},
{
.compatible = "rockchip,rv1108-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rv1108_pmu
+ .data = &soc_data_rv1108_pmu
},
{ /* sentinel */ },
};
@@ -443,7 +443,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iod);
match = of_match_node(rockchip_iodomain_match, np);
- iod->soc_data = (struct rockchip_iodomain_soc_data *)match->data;
+ iod->soc_data = match->data;
parent = pdev->dev.parent;
if (parent && parent->of_node) {
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index d1694f1..35636e1 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -29,6 +29,7 @@
#include <linux/sysfs.h>
#include <linux/cpu.h>
#include <linux/powercap.h>
+#include <linux/suspend.h>
#include <asm/iosf_mbi.h>
#include <asm/processor.h>
@@ -155,6 +156,7 @@ struct rapl_power_limit {
int prim_id; /* primitive ID used to enable */
struct rapl_domain *domain;
const char *name;
+ u64 last_power_limit;
};
static const char pl1_name[] = "long_term";
@@ -1209,7 +1211,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
struct rapl_domain *rd;
char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
struct powercap_zone *power_zone = NULL;
- int nr_pl, ret;;
+ int nr_pl, ret;
/* Update the domain data of the new package */
rapl_update_domain_data(rp);
@@ -1533,6 +1535,92 @@ static int rapl_cpu_down_prep(unsigned int cpu)
static enum cpuhp_state pcap_rapl_online;
+static void power_limit_state_save(void)
+{
+ struct rapl_package *rp;
+ struct rapl_domain *rd;
+ int nr_pl, ret, i;
+
+ get_online_cpus();
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ if (!rp->power_zone)
+ continue;
+ rd = power_zone_to_rapl_domain(rp->power_zone);
+ nr_pl = find_nr_power_limit(rd);
+ for (i = 0; i < nr_pl; i++) {
+ switch (rd->rpl[i].prim_id) {
+ case PL1_ENABLE:
+ ret = rapl_read_data_raw(rd,
+ POWER_LIMIT1,
+ true,
+ &rd->rpl[i].last_power_limit);
+ if (ret)
+ rd->rpl[i].last_power_limit = 0;
+ break;
+ case PL2_ENABLE:
+ ret = rapl_read_data_raw(rd,
+ POWER_LIMIT2,
+ true,
+ &rd->rpl[i].last_power_limit);
+ if (ret)
+ rd->rpl[i].last_power_limit = 0;
+ break;
+ }
+ }
+ }
+ put_online_cpus();
+}
+
+static void power_limit_state_restore(void)
+{
+ struct rapl_package *rp;
+ struct rapl_domain *rd;
+ int nr_pl, i;
+
+ get_online_cpus();
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ if (!rp->power_zone)
+ continue;
+ rd = power_zone_to_rapl_domain(rp->power_zone);
+ nr_pl = find_nr_power_limit(rd);
+ for (i = 0; i < nr_pl; i++) {
+ switch (rd->rpl[i].prim_id) {
+ case PL1_ENABLE:
+ if (rd->rpl[i].last_power_limit)
+ rapl_write_data_raw(rd,
+ POWER_LIMIT1,
+ rd->rpl[i].last_power_limit);
+ break;
+ case PL2_ENABLE:
+ if (rd->rpl[i].last_power_limit)
+ rapl_write_data_raw(rd,
+ POWER_LIMIT2,
+ rd->rpl[i].last_power_limit);
+ break;
+ }
+ }
+ }
+ put_online_cpus();
+}
+
+static int rapl_pm_callback(struct notifier_block *nb,
+ unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_SUSPEND_PREPARE:
+ power_limit_state_save();
+ break;
+ case PM_POST_SUSPEND:
+ power_limit_state_restore();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rapl_pm_notifier = {
+ .notifier_call = rapl_pm_callback,
+};
+
static int __init rapl_init(void)
{
const struct x86_cpu_id *id;
@@ -1560,8 +1648,16 @@ static int __init rapl_init(void)
/* Don't bail out if PSys is not supported */
rapl_register_psys();
+
+ ret = register_pm_notifier(&rapl_pm_notifier);
+ if (ret)
+ goto err_unreg_all;
+
return 0;
+err_unreg_all:
+ cpuhp_remove_state(pcap_rapl_online);
+
err_unreg:
rapl_unregister_powercap();
return ret;
@@ -1569,6 +1665,7 @@ err_unreg:
static void __exit rapl_exit(void)
{
+ unregister_pm_notifier(&rapl_pm_notifier);
cpuhp_remove_state(pcap_rapl_online);
rapl_unregister_powercap();
}
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 5b10b50..64b2b25 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -673,15 +673,13 @@ EXPORT_SYMBOL_GPL(powercap_unregister_control_type);
static int __init powercap_init(void)
{
- int result = 0;
+ int result;
result = seed_constraint_attributes();
if (result)
return result;
- result = class_register(&powercap_class);
-
- return result;
+ return class_register(&powercap_class);
}
device_initcall(powercap_init);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 96cd55f..b27417c 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -744,6 +744,13 @@ config REGULATOR_S5M8767
via I2C bus. S5M8767A have 9 Bucks and 28 LDOs output and
supports DVS mode with 8bits of output voltage control.
+config REGULATOR_SC2731
+ tristate "Spreadtrum SC2731 power regulator driver"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ help
+ This driver provides support for the voltage regulators on the
+ SC2731 PMIC.
+
config REGULATOR_SKY81452
tristate "Skyworks Solutions SKY81452 voltage regulator"
depends on MFD_SKY81452
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 80ffc57..19fea09 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -95,6 +95,7 @@ obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
+obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o
obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o
obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b64b791..42681c1 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -58,8 +58,6 @@ static bool has_full_constraints;
static struct dentry *debugfs_root;
-static struct class regulator_class;
-
/*
* struct regulator_map
*
@@ -112,11 +110,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
const char *supply_name);
static void _regulator_put(struct regulator *regulator);
-static struct regulator_dev *dev_to_rdev(struct device *dev)
-{
- return container_of(dev, struct regulator_dev, dev);
-}
-
static const char *rdev_get_name(struct regulator_dev *rdev)
{
if (rdev->constraints && rdev->constraints->name)
@@ -236,26 +229,35 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
return 0;
}
+/* return 0 if the state is valid */
+static int regulator_check_states(suspend_state_t state)
+{
+ return (state > PM_SUSPEND_MAX || state == PM_SUSPEND_TO_IDLE);
+}
+
/* Make sure we select a voltage that suits the needs of all
* regulator consumers
*/
static int regulator_check_consumers(struct regulator_dev *rdev,
- int *min_uV, int *max_uV)
+ int *min_uV, int *max_uV,
+ suspend_state_t state)
{
struct regulator *regulator;
+ struct regulator_voltage *voltage;
list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ voltage = &regulator->voltage[state];
/*
* Assume consumers that didn't say anything are OK
* with anything in the constraint range.
*/
- if (!regulator->min_uV && !regulator->max_uV)
+ if (!voltage->min_uV && !voltage->max_uV)
continue;
- if (*max_uV > regulator->max_uV)
- *max_uV = regulator->max_uV;
- if (*min_uV < regulator->min_uV)
- *min_uV = regulator->min_uV;
+ if (*max_uV > voltage->max_uV)
+ *max_uV = voltage->max_uV;
+ if (*min_uV < voltage->min_uV)
+ *min_uV = voltage->min_uV;
}
if (*min_uV > *max_uV) {
@@ -324,6 +326,24 @@ static int regulator_mode_constrain(struct regulator_dev *rdev,
return -EINVAL;
}
+static inline struct regulator_state *
+regulator_get_suspend_state(struct regulator_dev *rdev, suspend_state_t state)
+{
+ if (rdev->constraints == NULL)
+ return NULL;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ return &rdev->constraints->state_standby;
+ case PM_SUSPEND_MEM:
+ return &rdev->constraints->state_mem;
+ case PM_SUSPEND_MAX:
+ return &rdev->constraints->state_disk;
+ default:
+ return NULL;
+ }
+}
+
static ssize_t regulator_uV_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -731,29 +751,32 @@ static int drms_uA_update(struct regulator_dev *rdev)
}
static int suspend_set_state(struct regulator_dev *rdev,
- struct regulator_state *rstate)
+ suspend_state_t state)
{
int ret = 0;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
/* If we have no suspend mode configration don't set anything;
* only warn if the driver implements set_suspend_voltage or
* set_suspend_mode callback.
*/
- if (!rstate->enabled && !rstate->disabled) {
+ if (rstate->enabled != ENABLE_IN_SUSPEND &&
+ rstate->enabled != DISABLE_IN_SUSPEND) {
if (rdev->desc->ops->set_suspend_voltage ||
rdev->desc->ops->set_suspend_mode)
rdev_warn(rdev, "No configuration\n");
return 0;
}
- if (rstate->enabled && rstate->disabled) {
- rdev_err(rdev, "invalid configuration\n");
- return -EINVAL;
- }
-
- if (rstate->enabled && rdev->desc->ops->set_suspend_enable)
+ if (rstate->enabled == ENABLE_IN_SUSPEND &&
+ rdev->desc->ops->set_suspend_enable)
ret = rdev->desc->ops->set_suspend_enable(rdev);
- else if (rstate->disabled && rdev->desc->ops->set_suspend_disable)
+ else if (rstate->enabled == DISABLE_IN_SUSPEND &&
+ rdev->desc->ops->set_suspend_disable)
ret = rdev->desc->ops->set_suspend_disable(rdev);
else /* OK if set_suspend_enable or set_suspend_disable is NULL */
ret = 0;
@@ -778,28 +801,8 @@ static int suspend_set_state(struct regulator_dev *rdev,
return ret;
}
}
- return ret;
-}
-
-/* locks held by caller */
-static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
-{
- if (!rdev->constraints)
- return -EINVAL;
- switch (state) {
- case PM_SUSPEND_STANDBY:
- return suspend_set_state(rdev,
- &rdev->constraints->state_standby);
- case PM_SUSPEND_MEM:
- return suspend_set_state(rdev,
- &rdev->constraints->state_mem);
- case PM_SUSPEND_MAX:
- return suspend_set_state(rdev,
- &rdev->constraints->state_disk);
- default:
- return -EINVAL;
- }
+ return ret;
}
static void print_constraints(struct regulator_dev *rdev)
@@ -1068,7 +1071,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
/* do we need to setup our suspend state */
if (rdev->constraints->initial_state) {
- ret = suspend_prepare(rdev, rdev->constraints->initial_state);
+ ret = suspend_set_state(rdev, rdev->constraints->initial_state);
if (ret < 0) {
rdev_err(rdev, "failed to set suspend state\n");
return ret;
@@ -1356,9 +1359,9 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
debugfs_create_u32("uA_load", 0444, regulator->debugfs,
&regulator->uA_load);
debugfs_create_u32("min_uV", 0444, regulator->debugfs,
- &regulator->min_uV);
+ &regulator->voltage[PM_SUSPEND_ON].min_uV);
debugfs_create_u32("max_uV", 0444, regulator->debugfs,
- &regulator->max_uV);
+ &regulator->voltage[PM_SUSPEND_ON].max_uV);
debugfs_create_file("constraint_flags", 0444,
regulator->debugfs, regulator,
&constraint_flags_fops);
@@ -1417,20 +1420,6 @@ static void regulator_supply_alias(struct device **dev, const char **supply)
}
}
-static int of_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
-static struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
-{
- struct device *dev;
-
- dev = class_find_device(&regulator_class, NULL, np, of_node_match);
-
- return dev ? dev_to_rdev(dev) : NULL;
-}
-
static int regulator_match(struct device *dev, const void *data)
{
struct regulator_dev *r = dev_to_rdev(dev);
@@ -2468,10 +2457,9 @@ static int _regulator_is_enabled(struct regulator_dev *rdev)
return rdev->desc->ops->is_enabled(rdev);
}
-static int _regulator_list_voltage(struct regulator *regulator,
- unsigned selector, int lock)
+static int _regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector, int lock)
{
- struct regulator_dev *rdev = regulator->rdev;
const struct regulator_ops *ops = rdev->desc->ops;
int ret;
@@ -2487,7 +2475,8 @@ static int _regulator_list_voltage(struct regulator *regulator,
if (lock)
mutex_unlock(&rdev->mutex);
} else if (rdev->is_switch && rdev->supply) {
- ret = _regulator_list_voltage(rdev->supply, selector, lock);
+ ret = _regulator_list_voltage(rdev->supply->rdev,
+ selector, lock);
} else {
return -EINVAL;
}
@@ -2563,7 +2552,7 @@ EXPORT_SYMBOL_GPL(regulator_count_voltages);
*/
int regulator_list_voltage(struct regulator *regulator, unsigned selector)
{
- return _regulator_list_voltage(regulator, selector, 1);
+ return _regulator_list_voltage(regulator->rdev, selector, 1);
}
EXPORT_SYMBOL_GPL(regulator_list_voltage);
@@ -2605,8 +2594,8 @@ int regulator_get_hardware_vsel_register(struct regulator *regulator,
if (ops->set_voltage_sel != regulator_set_voltage_sel_regmap)
return -EOPNOTSUPP;
- *vsel_reg = rdev->desc->vsel_reg;
- *vsel_mask = rdev->desc->vsel_mask;
+ *vsel_reg = rdev->desc->vsel_reg;
+ *vsel_mask = rdev->desc->vsel_mask;
return 0;
}
@@ -2897,10 +2886,38 @@ out:
return ret;
}
+static int _regulator_do_set_suspend_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, suspend_state_t state)
+{
+ struct regulator_state *rstate;
+ int uV, sel;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (min_uV < rstate->min_uV)
+ min_uV = rstate->min_uV;
+ if (max_uV > rstate->max_uV)
+ max_uV = rstate->max_uV;
+
+ sel = regulator_map_voltage(rdev, min_uV, max_uV);
+ if (sel < 0)
+ return sel;
+
+ uV = rdev->desc->ops->list_voltage(rdev, sel);
+ if (uV >= min_uV && uV <= max_uV)
+ rstate->uV = uV;
+
+ return 0;
+}
+
static int regulator_set_voltage_unlocked(struct regulator *regulator,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ suspend_state_t state)
{
struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_voltage *voltage = &regulator->voltage[state];
int ret = 0;
int old_min_uV, old_max_uV;
int current_uV;
@@ -2911,7 +2928,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
* should be a noop (some cpufreq implementations use the same
* voltage for multiple frequencies, for example).
*/
- if (regulator->min_uV == min_uV && regulator->max_uV == max_uV)
+ if (voltage->min_uV == min_uV && voltage->max_uV == max_uV)
goto out;
/* If we're trying to set a range that overlaps the current voltage,
@@ -2921,8 +2938,8 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
current_uV = _regulator_get_voltage(rdev);
if (min_uV <= current_uV && current_uV <= max_uV) {
- regulator->min_uV = min_uV;
- regulator->max_uV = max_uV;
+ voltage->min_uV = min_uV;
+ voltage->max_uV = max_uV;
goto out;
}
}
@@ -2940,12 +2957,12 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
goto out;
/* restore original values in case of error */
- old_min_uV = regulator->min_uV;
- old_max_uV = regulator->max_uV;
- regulator->min_uV = min_uV;
- regulator->max_uV = max_uV;
+ old_min_uV = voltage->min_uV;
+ old_max_uV = voltage->max_uV;
+ voltage->min_uV = min_uV;
+ voltage->max_uV = max_uV;
- ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+ ret = regulator_check_consumers(rdev, &min_uV, &max_uV, state);
if (ret < 0)
goto out2;
@@ -2963,7 +2980,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
goto out2;
}
- best_supply_uV = _regulator_list_voltage(regulator, selector, 0);
+ best_supply_uV = _regulator_list_voltage(rdev, selector, 0);
if (best_supply_uV < 0) {
ret = best_supply_uV;
goto out2;
@@ -2982,7 +2999,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
if (supply_change_uV > 0) {
ret = regulator_set_voltage_unlocked(rdev->supply,
- best_supply_uV, INT_MAX);
+ best_supply_uV, INT_MAX, state);
if (ret) {
dev_err(&rdev->dev, "Failed to increase supply voltage: %d\n",
ret);
@@ -2990,13 +3007,17 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
}
}
- ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+ if (state == PM_SUSPEND_ON)
+ ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+ else
+ ret = _regulator_do_set_suspend_voltage(rdev, min_uV,
+ max_uV, state);
if (ret < 0)
goto out2;
if (supply_change_uV < 0) {
ret = regulator_set_voltage_unlocked(rdev->supply,
- best_supply_uV, INT_MAX);
+ best_supply_uV, INT_MAX, state);
if (ret)
dev_warn(&rdev->dev, "Failed to decrease supply voltage: %d\n",
ret);
@@ -3007,8 +3028,8 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
out:
return ret;
out2:
- regulator->min_uV = old_min_uV;
- regulator->max_uV = old_max_uV;
+ voltage->min_uV = old_min_uV;
+ voltage->max_uV = old_max_uV;
return ret;
}
@@ -3037,7 +3058,8 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
regulator_lock_supply(regulator->rdev);
- ret = regulator_set_voltage_unlocked(regulator, min_uV, max_uV);
+ ret = regulator_set_voltage_unlocked(regulator, min_uV, max_uV,
+ PM_SUSPEND_ON);
regulator_unlock_supply(regulator->rdev);
@@ -3045,6 +3067,89 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
}
EXPORT_SYMBOL_GPL(regulator_set_voltage);
+static inline int regulator_suspend_toggle(struct regulator_dev *rdev,
+ suspend_state_t state, bool en)
+{
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (!rstate->changeable)
+ return -EPERM;
+
+ rstate->enabled = en;
+
+ return 0;
+}
+
+int regulator_suspend_enable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return regulator_suspend_toggle(rdev, state, true);
+}
+EXPORT_SYMBOL_GPL(regulator_suspend_enable);
+
+int regulator_suspend_disable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct regulator *regulator;
+ struct regulator_voltage *voltage;
+
+ /*
+ * if any consumer wants this regulator device keeping on in
+ * suspend states, don't set it as disabled.
+ */
+ list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ voltage = &regulator->voltage[state];
+ if (voltage->min_uV || voltage->max_uV)
+ return 0;
+ }
+
+ return regulator_suspend_toggle(rdev, state, false);
+}
+EXPORT_SYMBOL_GPL(regulator_suspend_disable);
+
+static int _regulator_set_suspend_voltage(struct regulator *regulator,
+ int min_uV, int max_uV,
+ suspend_state_t state)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (rstate->min_uV == rstate->max_uV) {
+ rdev_err(rdev, "The suspend voltage can't be changed!\n");
+ return -EPERM;
+ }
+
+ return regulator_set_voltage_unlocked(regulator, min_uV, max_uV, state);
+}
+
+int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV,
+ int max_uV, suspend_state_t state)
+{
+ int ret = 0;
+
+ /* PM_SUSPEND_ON is handled by regulator_set_voltage() */
+ if (regulator_check_states(state) || state == PM_SUSPEND_ON)
+ return -EINVAL;
+
+ regulator_lock_supply(regulator->rdev);
+
+ ret = _regulator_set_suspend_voltage(regulator, min_uV,
+ max_uV, state);
+
+ regulator_unlock_supply(regulator->rdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_set_suspend_voltage);
+
/**
* regulator_set_voltage_time - get raise/fall time
* @regulator: regulator source
@@ -3138,6 +3243,7 @@ EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel);
int regulator_sync_voltage(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_voltage *voltage = &regulator->voltage[PM_SUSPEND_ON];
int ret, min_uV, max_uV;
mutex_lock(&rdev->mutex);
@@ -3149,20 +3255,20 @@ int regulator_sync_voltage(struct regulator *regulator)
}
/* This is only going to work if we've had a voltage configured. */
- if (!regulator->min_uV && !regulator->max_uV) {
+ if (!voltage->min_uV && !voltage->max_uV) {
ret = -EINVAL;
goto out;
}
- min_uV = regulator->min_uV;
- max_uV = regulator->max_uV;
+ min_uV = voltage->min_uV;
+ max_uV = voltage->max_uV;
/* This should be a paranoia check... */
ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
if (ret < 0)
goto out;
- ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+ ret = regulator_check_consumers(rdev, &min_uV, &max_uV, 0);
if (ret < 0)
goto out;
@@ -3918,12 +4024,6 @@ static void regulator_dev_release(struct device *dev)
kfree(rdev);
}
-static struct class regulator_class = {
- .name = "regulator",
- .dev_release = regulator_dev_release,
- .dev_groups = regulator_dev_groups,
-};
-
static void rdev_init_debugfs(struct regulator_dev *rdev)
{
struct device *parent = rdev->dev.parent;
@@ -4174,81 +4274,86 @@ void regulator_unregister(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(regulator_unregister);
-static int _regulator_suspend_prepare(struct device *dev, void *data)
+#ifdef CONFIG_SUSPEND
+static int _regulator_suspend_late(struct device *dev, void *data)
{
struct regulator_dev *rdev = dev_to_rdev(dev);
- const suspend_state_t *state = data;
+ suspend_state_t *state = data;
int ret;
mutex_lock(&rdev->mutex);
- ret = suspend_prepare(rdev, *state);
+ ret = suspend_set_state(rdev, *state);
mutex_unlock(&rdev->mutex);
return ret;
}
/**
- * regulator_suspend_prepare - prepare regulators for system wide suspend
+ * regulator_suspend_late - prepare regulators for system wide suspend
* @state: system suspend state
*
* Configure each regulator with it's suspend operating parameters for state.
- * This will usually be called by machine suspend code prior to supending.
*/
-int regulator_suspend_prepare(suspend_state_t state)
+static int regulator_suspend_late(struct device *dev)
{
- /* ON is handled by regulator active state */
- if (state == PM_SUSPEND_ON)
- return -EINVAL;
+ suspend_state_t state = pm_suspend_target_state;
return class_for_each_device(&regulator_class, NULL, &state,
- _regulator_suspend_prepare);
+ _regulator_suspend_late);
}
-EXPORT_SYMBOL_GPL(regulator_suspend_prepare);
-
-static int _regulator_suspend_finish(struct device *dev, void *data)
+static int _regulator_resume_early(struct device *dev, void *data)
{
+ int ret = 0;
struct regulator_dev *rdev = dev_to_rdev(dev);
- int ret;
+ suspend_state_t *state = data;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, *state);
+ if (rstate == NULL)
+ return -EINVAL;
mutex_lock(&rdev->mutex);
- if (rdev->use_count > 0 || rdev->constraints->always_on) {
- if (!_regulator_is_enabled(rdev)) {
- ret = _regulator_do_enable(rdev);
- if (ret)
- dev_err(dev,
- "Failed to resume regulator %d\n",
- ret);
- }
- } else {
- if (!have_full_constraints())
- goto unlock;
- if (!_regulator_is_enabled(rdev))
- goto unlock;
- ret = _regulator_do_disable(rdev);
- if (ret)
- dev_err(dev, "Failed to suspend regulator %d\n", ret);
- }
-unlock:
+ if (rdev->desc->ops->resume_early &&
+ (rstate->enabled == ENABLE_IN_SUSPEND ||
+ rstate->enabled == DISABLE_IN_SUSPEND))
+ ret = rdev->desc->ops->resume_early(rdev);
+
mutex_unlock(&rdev->mutex);
- /* Keep processing regulators in spite of any errors */
- return 0;
+ return ret;
}
-/**
- * regulator_suspend_finish - resume regulators from system wide suspend
- *
- * Turn on regulators that might be turned off by regulator_suspend_prepare
- * and that should be turned on according to the regulators properties.
- */
-int regulator_suspend_finish(void)
+static int regulator_resume_early(struct device *dev)
{
- return class_for_each_device(&regulator_class, NULL, NULL,
- _regulator_suspend_finish);
+ suspend_state_t state = pm_suspend_target_state;
+
+ return class_for_each_device(&regulator_class, NULL, &state,
+ _regulator_resume_early);
}
-EXPORT_SYMBOL_GPL(regulator_suspend_finish);
+#else /* !CONFIG_SUSPEND */
+
+#define regulator_suspend_late NULL
+#define regulator_resume_early NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops __maybe_unused regulator_pm_ops = {
+ .suspend_late = regulator_suspend_late,
+ .resume_early = regulator_resume_early,
+};
+#endif
+
+struct class regulator_class = {
+ .name = "regulator",
+ .dev_release = regulator_dev_release,
+ .dev_groups = regulator_dev_groups,
+#ifdef CONFIG_PM
+ .pm = &regulator_pm_ops,
+#endif
+};
/**
* regulator_has_full_constraints - the system has fully specified constraints
*
@@ -4424,8 +4529,8 @@ static void regulator_summary_show_subtree(struct seq_file *s,
switch (rdev->desc->type) {
case REGULATOR_VOLTAGE:
seq_printf(s, "%37dmV %5dmV",
- consumer->min_uV / 1000,
- consumer->max_uV / 1000);
+ consumer->voltage[PM_SUSPEND_ON].min_uV / 1000,
+ consumer->voltage[PM_SUSPEND_ON].max_uV / 1000);
break;
case REGULATOR_CURRENT:
break;
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 66a8ea0..abfd56e 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -16,10 +16,25 @@
#ifndef __REGULATOR_INTERNAL_H
#define __REGULATOR_INTERNAL_H
+#include <linux/suspend.h>
+
+#define REGULATOR_STATES_NUM (PM_SUSPEND_MAX + 1)
+
+struct regulator_voltage {
+ int min_uV;
+ int max_uV;
+};
+
/*
* struct regulator
*
* One for each consumer device.
+ * @voltage - a voltage array for each state of runtime, i.e.:
+ * PM_SUSPEND_ON
+ * PM_SUSPEND_TO_IDLE
+ * PM_SUSPEND_STANDBY
+ * PM_SUSPEND_MEM
+ * PM_SUSPEND_MAX
*/
struct regulator {
struct device *dev;
@@ -27,14 +42,22 @@ struct regulator {
unsigned int always_on:1;
unsigned int bypass:1;
int uA_load;
- int min_uV;
- int max_uV;
+ struct regulator_voltage voltage[REGULATOR_STATES_NUM];
const char *supply_name;
struct device_attribute dev_attr;
struct regulator_dev *rdev;
struct dentry *debugfs;
};
+extern struct class regulator_class;
+
+static inline struct regulator_dev *dev_to_rdev(struct device *dev)
+{
+ return container_of(dev, struct regulator_dev, dev);
+}
+
+struct regulator_dev *of_find_regulator_by_node(struct device_node *np);
+
#ifdef CONFIG_OF
struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
const struct regulator_desc *desc,
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 14637a0..092ed6e 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -177,14 +177,30 @@ static void of_get_regulation_constraints(struct device_node *np,
if (of_property_read_bool(suspend_np,
"regulator-on-in-suspend"))
- suspend_state->enabled = true;
+ suspend_state->enabled = ENABLE_IN_SUSPEND;
else if (of_property_read_bool(suspend_np,
"regulator-off-in-suspend"))
- suspend_state->disabled = true;
+ suspend_state->enabled = DISABLE_IN_SUSPEND;
+ else
+ suspend_state->enabled = DO_NOTHING_IN_SUSPEND;
+
+ if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
+ &pval))
+ suspend_state->min_uV = pval;
+
+ if (!of_property_read_u32(np, "regulator-suspend-max-microvolt",
+ &pval))
+ suspend_state->max_uV = pval;
if (!of_property_read_u32(suspend_np,
"regulator-suspend-microvolt", &pval))
suspend_state->uV = pval;
+ else /* otherwise use min_uV as default suspend voltage */
+ suspend_state->uV = suspend_state->min_uV;
+
+ if (of_property_read_bool(suspend_np,
+ "regulator-changeable-in-suspend"))
+ suspend_state->changeable = true;
if (i == PM_SUSPEND_MEM)
constraints->initial_state = PM_SUSPEND_MEM;
@@ -376,3 +392,17 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
return init_data;
}
+
+static int of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = class_find_device(&regulator_class, NULL, np, of_node_match);
+
+ return dev ? dev_to_rdev(dev) : NULL;
+}
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 0241ada..63c7a0c 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -486,24 +486,6 @@ static int spmi_vreg_update_bits(struct spmi_regulator *vreg, u16 addr, u8 val,
return regmap_update_bits(vreg->regmap, vreg->base + addr, mask, val);
}
-static int spmi_regulator_common_is_enabled(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- u8 reg;
-
- spmi_vreg_read(vreg, SPMI_COMMON_REG_ENABLE, &reg, 1);
-
- return (reg & SPMI_COMMON_ENABLE_MASK) == SPMI_COMMON_ENABLE;
-}
-
-static int spmi_regulator_common_enable(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
-
- return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_ENABLE,
- SPMI_COMMON_ENABLE, SPMI_COMMON_ENABLE_MASK);
-}
-
static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
@@ -513,7 +495,7 @@ static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
vreg->vs_enable_time = ktime_get();
}
- return spmi_regulator_common_enable(rdev);
+ return regulator_enable_regmap(rdev);
}
static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
@@ -524,14 +506,6 @@ static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
return spmi_vreg_write(vreg, SPMI_VS_REG_OCP, &reg, 1);
}
-static int spmi_regulator_common_disable(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
-
- return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_ENABLE,
- SPMI_COMMON_DISABLE, SPMI_COMMON_ENABLE_MASK);
-}
-
static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
int min_uV, int max_uV)
{
@@ -1062,9 +1036,9 @@ static irqreturn_t spmi_regulator_vs_ocp_isr(int irq, void *data)
}
static struct regulator_ops spmi_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_common_get_voltage,
@@ -1077,9 +1051,9 @@ static struct regulator_ops spmi_smps_ops = {
};
static struct regulator_ops spmi_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.get_voltage_sel = spmi_regulator_common_get_voltage,
.map_voltage = spmi_regulator_common_map_voltage,
@@ -1094,9 +1068,9 @@ static struct regulator_ops spmi_ldo_ops = {
};
static struct regulator_ops spmi_ln_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.get_voltage_sel = spmi_regulator_common_get_voltage,
.map_voltage = spmi_regulator_common_map_voltage,
@@ -1107,8 +1081,8 @@ static struct regulator_ops spmi_ln_ldo_ops = {
static struct regulator_ops spmi_vs_ops = {
.enable = spmi_regulator_vs_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_pull_down = spmi_regulator_common_set_pull_down,
.set_soft_start = spmi_regulator_common_set_soft_start,
.set_over_current_protection = spmi_regulator_vs_ocp,
@@ -1117,9 +1091,9 @@ static struct regulator_ops spmi_vs_ops = {
};
static struct regulator_ops spmi_boost_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
@@ -1128,9 +1102,9 @@ static struct regulator_ops spmi_boost_ops = {
};
static struct regulator_ops spmi_ftsmps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_common_get_voltage,
@@ -1143,9 +1117,9 @@ static struct regulator_ops spmi_ftsmps_ops = {
};
static struct regulator_ops spmi_ult_lo_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_ult_lo_smps_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_ult_lo_smps_get_voltage,
@@ -1157,9 +1131,9 @@ static struct regulator_ops spmi_ult_lo_smps_ops = {
};
static struct regulator_ops spmi_ult_ho_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
@@ -1172,9 +1146,9 @@ static struct regulator_ops spmi_ult_ho_smps_ops = {
};
static struct regulator_ops spmi_ult_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
@@ -1711,6 +1685,9 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
vreg->desc.id = -1;
vreg->desc.owner = THIS_MODULE;
vreg->desc.type = REGULATOR_VOLTAGE;
+ vreg->desc.enable_reg = reg->base + SPMI_COMMON_REG_ENABLE;
+ vreg->desc.enable_mask = SPMI_COMMON_ENABLE_MASK;
+ vreg->desc.enable_val = SPMI_COMMON_ENABLE;
vreg->desc.name = name = reg->name;
vreg->desc.supply_name = reg->supply;
vreg->desc.of_match = reg->name;
@@ -1723,6 +1700,7 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
config.dev = dev;
config.driver_data = vreg;
+ config.regmap = regmap;
rdev = devm_regulator_register(dev, &vreg->desc, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n", name);
diff --git a/drivers/regulator/sc2731-regulator.c b/drivers/regulator/sc2731-regulator.c
new file mode 100644
index 0000000..eb2bdf0
--- /dev/null
+++ b/drivers/regulator/sc2731-regulator.c
@@ -0,0 +1,256 @@
+ //SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+/*
+ * SC2731 regulator lock register
+ */
+#define SC2731_PWR_WR_PROT 0xf0c
+#define SC2731_WR_UNLOCK_VALUE 0x6e7f
+
+/*
+ * SC2731 enable register
+ */
+#define SC2731_POWER_PD_SW 0xc28
+#define SC2731_LDO_CAMA0_PD 0xcfc
+#define SC2731_LDO_CAMA1_PD 0xd04
+#define SC2731_LDO_CAMMOT_PD 0xd0c
+#define SC2731_LDO_VLDO_PD 0xd6c
+#define SC2731_LDO_EMMCCORE_PD 0xd2c
+#define SC2731_LDO_SDCORE_PD 0xd74
+#define SC2731_LDO_SDIO_PD 0xd70
+#define SC2731_LDO_WIFIPA_PD 0xd4c
+#define SC2731_LDO_USB33_PD 0xd5c
+#define SC2731_LDO_CAMD0_PD 0xd7c
+#define SC2731_LDO_CAMD1_PD 0xd84
+#define SC2731_LDO_CON_PD 0xd8c
+#define SC2731_LDO_CAMIO_PD 0xd94
+#define SC2731_LDO_SRAM_PD 0xd78
+
+/*
+ * SC2731 enable mask
+ */
+#define SC2731_DCDC_CPU0_PD_MASK BIT(4)
+#define SC2731_DCDC_CPU1_PD_MASK BIT(3)
+#define SC2731_DCDC_RF_PD_MASK BIT(11)
+#define SC2731_LDO_CAMA0_PD_MASK BIT(0)
+#define SC2731_LDO_CAMA1_PD_MASK BIT(0)
+#define SC2731_LDO_CAMMOT_PD_MASK BIT(0)
+#define SC2731_LDO_VLDO_PD_MASK BIT(0)
+#define SC2731_LDO_EMMCCORE_PD_MASK BIT(0)
+#define SC2731_LDO_SDCORE_PD_MASK BIT(0)
+#define SC2731_LDO_SDIO_PD_MASK BIT(0)
+#define SC2731_LDO_WIFIPA_PD_MASK BIT(0)
+#define SC2731_LDO_USB33_PD_MASK BIT(0)
+#define SC2731_LDO_CAMD0_PD_MASK BIT(0)
+#define SC2731_LDO_CAMD1_PD_MASK BIT(0)
+#define SC2731_LDO_CON_PD_MASK BIT(0)
+#define SC2731_LDO_CAMIO_PD_MASK BIT(0)
+#define SC2731_LDO_SRAM_PD_MASK BIT(0)
+
+/*
+ * SC2731 vsel register
+ */
+#define SC2731_DCDC_CPU0_VOL 0xc54
+#define SC2731_DCDC_CPU1_VOL 0xc64
+#define SC2731_DCDC_RF_VOL 0xcb8
+#define SC2731_LDO_CAMA0_VOL 0xd00
+#define SC2731_LDO_CAMA1_VOL 0xd08
+#define SC2731_LDO_CAMMOT_VOL 0xd10
+#define SC2731_LDO_VLDO_VOL 0xd28
+#define SC2731_LDO_EMMCCORE_VOL 0xd30
+#define SC2731_LDO_SDCORE_VOL 0xd38
+#define SC2731_LDO_SDIO_VOL 0xd40
+#define SC2731_LDO_WIFIPA_VOL 0xd50
+#define SC2731_LDO_USB33_VOL 0xd60
+#define SC2731_LDO_CAMD0_VOL 0xd80
+#define SC2731_LDO_CAMD1_VOL 0xd88
+#define SC2731_LDO_CON_VOL 0xd90
+#define SC2731_LDO_CAMIO_VOL 0xd98
+#define SC2731_LDO_SRAM_VOL 0xdB0
+
+/*
+ * SC2731 vsel register mask
+ */
+#define SC2731_DCDC_CPU0_VOL_MASK GENMASK(8, 0)
+#define SC2731_DCDC_CPU1_VOL_MASK GENMASK(8, 0)
+#define SC2731_DCDC_RF_VOL_MASK GENMASK(8, 0)
+#define SC2731_LDO_CAMA0_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMA1_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMMOT_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_VLDO_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_EMMCCORE_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_SDCORE_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_SDIO_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_WIFIPA_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_USB33_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMD0_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CAMD1_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CON_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CAMIO_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_SRAM_VOL_MASK GENMASK(6, 0)
+
+enum sc2731_regulator_id {
+ SC2731_BUCK_CPU0,
+ SC2731_BUCK_CPU1,
+ SC2731_BUCK_RF,
+ SC2731_LDO_CAMA0,
+ SC2731_LDO_CAMA1,
+ SC2731_LDO_CAMMOT,
+ SC2731_LDO_VLDO,
+ SC2731_LDO_EMMCCORE,
+ SC2731_LDO_SDCORE,
+ SC2731_LDO_SDIO,
+ SC2731_LDO_WIFIPA,
+ SC2731_LDO_USB33,
+ SC2731_LDO_CAMD0,
+ SC2731_LDO_CAMD1,
+ SC2731_LDO_CON,
+ SC2731_LDO_CAMIO,
+ SC2731_LDO_SRAM,
+};
+
+static const struct regulator_ops sc2731_regu_linear_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+#define SC2731_REGU_LINEAR(_id, en_reg, en_mask, vreg, vmask, \
+ vstep, vmin, vmax) { \
+ .name = #_id, \
+ .of_match = of_match_ptr(#_id), \
+ .ops = &sc2731_regu_linear_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = SC2731_##_id, \
+ .owner = THIS_MODULE, \
+ .min_uV = vmin, \
+ .n_voltages = ((vmax) - (vmin)) / (vstep) + 1, \
+ .uV_step = vstep, \
+ .enable_is_inverted = true, \
+ .enable_val = 0, \
+ .enable_reg = en_reg, \
+ .enable_mask = en_mask, \
+ .vsel_reg = vreg, \
+ .vsel_mask = vmask, \
+}
+
+static struct regulator_desc regulators[] = {
+ SC2731_REGU_LINEAR(BUCK_CPU0, SC2731_POWER_PD_SW,
+ SC2731_DCDC_CPU0_PD_MASK, SC2731_DCDC_CPU0_VOL,
+ SC2731_DCDC_CPU0_VOL_MASK, 3125, 400000, 1996875),
+ SC2731_REGU_LINEAR(BUCK_CPU1, SC2731_POWER_PD_SW,
+ SC2731_DCDC_CPU1_PD_MASK, SC2731_DCDC_CPU1_VOL,
+ SC2731_DCDC_CPU1_VOL_MASK, 3125, 400000, 1996875),
+ SC2731_REGU_LINEAR(BUCK_RF, SC2731_POWER_PD_SW, SC2731_DCDC_RF_PD_MASK,
+ SC2731_DCDC_RF_VOL, SC2731_DCDC_RF_VOL_MASK,
+ 3125, 600000, 2196875),
+ SC2731_REGU_LINEAR(LDO_CAMA0, SC2731_LDO_CAMA0_PD,
+ SC2731_LDO_CAMA0_PD_MASK, SC2731_LDO_CAMA0_VOL,
+ SC2731_LDO_CAMA0_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMA1, SC2731_LDO_CAMA1_PD,
+ SC2731_LDO_CAMA1_PD_MASK, SC2731_LDO_CAMA1_VOL,
+ SC2731_LDO_CAMA1_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMMOT, SC2731_LDO_CAMMOT_PD,
+ SC2731_LDO_CAMMOT_PD_MASK, SC2731_LDO_CAMMOT_VOL,
+ SC2731_LDO_CAMMOT_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_VLDO, SC2731_LDO_VLDO_PD,
+ SC2731_LDO_VLDO_PD_MASK, SC2731_LDO_VLDO_VOL,
+ SC2731_LDO_VLDO_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_EMMCCORE, SC2731_LDO_EMMCCORE_PD,
+ SC2731_LDO_EMMCCORE_PD_MASK, SC2731_LDO_EMMCCORE_VOL,
+ SC2731_LDO_EMMCCORE_VOL_MASK, 10000, 1200000,
+ 3750000),
+ SC2731_REGU_LINEAR(LDO_SDCORE, SC2731_LDO_SDCORE_PD,
+ SC2731_LDO_SDCORE_PD_MASK, SC2731_LDO_SDCORE_VOL,
+ SC2731_LDO_SDCORE_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_SDIO, SC2731_LDO_SDIO_PD,
+ SC2731_LDO_SDIO_PD_MASK, SC2731_LDO_SDIO_VOL,
+ SC2731_LDO_SDIO_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_WIFIPA, SC2731_LDO_WIFIPA_PD,
+ SC2731_LDO_WIFIPA_PD_MASK, SC2731_LDO_WIFIPA_VOL,
+ SC2731_LDO_WIFIPA_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_USB33, SC2731_LDO_USB33_PD,
+ SC2731_LDO_USB33_PD_MASK, SC2731_LDO_USB33_VOL,
+ SC2731_LDO_USB33_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMD0, SC2731_LDO_CAMD0_PD,
+ SC2731_LDO_CAMD0_PD_MASK, SC2731_LDO_CAMD0_VOL,
+ SC2731_LDO_CAMD0_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CAMD1, SC2731_LDO_CAMD1_PD,
+ SC2731_LDO_CAMD1_PD_MASK, SC2731_LDO_CAMD1_VOL,
+ SC2731_LDO_CAMD1_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CON, SC2731_LDO_CON_PD,
+ SC2731_LDO_CON_PD_MASK, SC2731_LDO_CON_VOL,
+ SC2731_LDO_CON_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CAMIO, SC2731_LDO_CAMIO_PD,
+ SC2731_LDO_CAMIO_PD_MASK, SC2731_LDO_CAMIO_VOL,
+ SC2731_LDO_CAMIO_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_SRAM, SC2731_LDO_SRAM_PD,
+ SC2731_LDO_SRAM_PD_MASK, SC2731_LDO_SRAM_VOL,
+ SC2731_LDO_SRAM_VOL_MASK, 6250, 1000000, 1793750),
+};
+
+static int sc2731_regulator_unlock(struct regmap *regmap)
+{
+ return regmap_write(regmap, SC2731_PWR_WR_PROT,
+ SC2731_WR_UNLOCK_VALUE);
+}
+
+static int sc2731_regulator_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ struct regmap *regmap;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ dev_err(&pdev->dev, "failed to get regmap.\n");
+ return -ENODEV;
+ }
+
+ ret = sc2731_regulator_unlock(regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to release regulator lock\n");
+ return ret;
+ }
+
+ config.dev = &pdev->dev;
+ config.regmap = regmap;
+
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ regulators[i].name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver sc2731_regulator_driver = {
+ .driver = {
+ .name = "sc27xx-regulator",
+ },
+ .probe = sc2731_regulator_probe,
+};
+
+module_platform_driver(sc2731_regulator_driver);
+
+MODULE_AUTHOR("Chen Junhui <erick.chen@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum SC2731 regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index bc48995..1827185 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -28,9 +28,6 @@
#include <linux/regulator/machine.h>
#include <linux/mfd/tps65218.h>
-enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4,
- DCDC5, DCDC6, LDO1, LS3 };
-
#define TPS65218_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \
_em, _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \
{ \
@@ -329,6 +326,8 @@ static int tps65218_regulator_probe(struct platform_device *pdev)
/* Allocate memory for strobes */
tps->strobes = devm_kzalloc(&pdev->dev, sizeof(u8) *
TPS65218_NUM_REGULATOR, GFP_KERNEL);
+ if (!tps->strobes)
+ return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(regulators); i++) {
rdev = devm_regulator_register(&pdev->dev, &regulators[i],
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index c94b606..ee14d8e 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2803,6 +2803,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
erp = dasd_3990_erp_handle_match_erp(cqr, erp);
}
+
+ /*
+ * For path verification work we need to stick with the path that was
+ * originally chosen so that the per path configuration data is
+ * assigned correctly.
+ */
+ if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
+ erp->lpm = cqr->lpm;
+ }
+
if (device->features & DASD_FEATURE_ERPLOG) {
/* print current erp_chain */
dev_err(&device->cdev->dev,
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 05ac6ba..614b44e 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -17,6 +17,8 @@ CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
CFLAGS_sclp_early_core.o += -march=z900
endif
+CFLAGS_sclp_early_core.o += -D__NO_FORTIFY
+
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o sclp_early_core.o
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 15015a2..badf42a 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -565,9 +565,9 @@ enum qeth_cq {
};
struct qeth_ipato {
- int enabled;
- int invert4;
- int invert6;
+ bool enabled;
+ bool invert4;
+ bool invert6;
struct list_head entries;
};
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 430e321..3614df6 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1480,9 +1480,9 @@ static int qeth_setup_card(struct qeth_card *card)
qeth_set_intial_options(card);
/* IP address takeover */
INIT_LIST_HEAD(&card->ipato.entries);
- card->ipato.enabled = 0;
- card->ipato.invert4 = 0;
- card->ipato.invert6 = 0;
+ card->ipato.enabled = false;
+ card->ipato.invert4 = false;
+ card->ipato.invert6 = false;
/* init QDIO stuff */
qeth_init_qdio_info(card);
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
@@ -5386,6 +5386,13 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_poll);
+static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
+{
+ if (!cmd->hdr.return_code)
+ cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+ return cmd->hdr.return_code;
+}
+
int qeth_setassparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
@@ -6242,7 +6249,7 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
(struct qeth_checksum_cmd *)reply->param;
QETH_CARD_TEXT(card, 4, "chkdoccb");
- if (cmd->hdr.return_code)
+ if (qeth_setassparms_inspect_rc(cmd))
return 0;
memset(chksum_cb, 0, sizeof(*chksum_cb));
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 194ae9b..e583383 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -82,7 +82,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
const u8 *);
-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
+void qeth_l3_update_ipato(struct qeth_card *card);
struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 6a73894..ef0961e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -164,8 +164,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
}
}
-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
- struct qeth_ipaddr *addr)
+static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
+ struct qeth_ipaddr *addr)
{
struct qeth_ipato_entry *ipatoe;
u8 addr_bits[128] = {0, };
@@ -174,6 +174,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
if (!card->ipato.enabled)
return 0;
+ if (addr->type != QETH_IP_TYPE_NORMAL)
+ return 0;
qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
(addr->proto == QETH_PROT_IPV4)? 4:16);
@@ -290,8 +292,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
addr->ref_counter = 1;
- if (addr->type == QETH_IP_TYPE_NORMAL &&
- qeth_l3_is_addr_covered_by_ipato(card, addr)) {
+ if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
QETH_CARD_TEXT(card, 2, "tkovaddr");
addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
}
@@ -605,6 +606,27 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
/*
* IP address takeover related functions
*/
+
+/**
+ * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
+ *
+ * Caller must hold ip_lock.
+ */
+void qeth_l3_update_ipato(struct qeth_card *card)
+{
+ struct qeth_ipaddr *addr;
+ unsigned int i;
+
+ hash_for_each(card->ip_htable, i, addr, hnode) {
+ if (addr->type != QETH_IP_TYPE_NORMAL)
+ continue;
+ if (qeth_l3_is_addr_covered_by_ipato(card, addr))
+ addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
+ else
+ addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG;
+ }
+}
+
static void qeth_l3_clear_ipato_list(struct qeth_card *card)
{
struct qeth_ipato_entry *ipatoe, *tmp;
@@ -616,6 +638,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
kfree(ipatoe);
}
+ qeth_l3_update_ipato(card);
spin_unlock_bh(&card->ip_lock);
}
@@ -640,8 +663,10 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
}
}
- if (!rc)
+ if (!rc) {
list_add_tail(&new->entry, &card->ipato.entries);
+ qeth_l3_update_ipato(card);
+ }
spin_unlock_bh(&card->ip_lock);
@@ -664,6 +689,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
(proto == QETH_PROT_IPV4)? 4:16) &&
(ipatoe->mask_bits == mask_bits)) {
list_del(&ipatoe->entry);
+ qeth_l3_update_ipato(card);
kfree(ipatoe);
}
}
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index bd12fdf..6ea2b52 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -370,8 +370,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- struct qeth_ipaddr *addr;
- int i, rc = 0;
+ bool enable;
+ int rc = 0;
if (!card)
return -EINVAL;
@@ -384,25 +384,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
}
if (sysfs_streq(buf, "toggle")) {
- card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
- } else if (sysfs_streq(buf, "1")) {
- card->ipato.enabled = 1;
- hash_for_each(card->ip_htable, i, addr, hnode) {
- if ((addr->type == QETH_IP_TYPE_NORMAL) &&
- qeth_l3_is_addr_covered_by_ipato(card, addr))
- addr->set_flags |=
- QETH_IPA_SETIP_TAKEOVER_FLAG;
- }
- } else if (sysfs_streq(buf, "0")) {
- card->ipato.enabled = 0;
- hash_for_each(card->ip_htable, i, addr, hnode) {
- if (addr->set_flags &
- QETH_IPA_SETIP_TAKEOVER_FLAG)
- addr->set_flags &=
- ~QETH_IPA_SETIP_TAKEOVER_FLAG;
- }
- } else
+ enable = !card->ipato.enabled;
+ } else if (kstrtobool(buf, &enable)) {
rc = -EINVAL;
+ goto out;
+ }
+
+ if (card->ipato.enabled != enable) {
+ card->ipato.enabled = enable;
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_update_ipato(card);
+ spin_unlock_bh(&card->ip_lock);
+ }
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -428,20 +421,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
+ bool invert;
int rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if (sysfs_streq(buf, "toggle"))
- card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
- else if (sysfs_streq(buf, "1"))
- card->ipato.invert4 = 1;
- else if (sysfs_streq(buf, "0"))
- card->ipato.invert4 = 0;
- else
+ if (sysfs_streq(buf, "toggle")) {
+ invert = !card->ipato.invert4;
+ } else if (kstrtobool(buf, &invert)) {
rc = -EINVAL;
+ goto out;
+ }
+
+ if (card->ipato.invert4 != invert) {
+ card->ipato.invert4 = invert;
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_update_ipato(card);
+ spin_unlock_bh(&card->ip_lock);
+ }
+out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
@@ -607,20 +607,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
+ bool invert;
int rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if (sysfs_streq(buf, "toggle"))
- card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
- else if (sysfs_streq(buf, "1"))
- card->ipato.invert6 = 1;
- else if (sysfs_streq(buf, "0"))
- card->ipato.invert6 = 0;
- else
+ if (sysfs_streq(buf, "toggle")) {
+ invert = !card->ipato.invert6;
+ } else if (kstrtobool(buf, &invert)) {
rc = -EINVAL;
+ goto out;
+ }
+
+ if (card->ipato.invert6 != invert) {
+ card->ipato.invert6 = invert;
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_update_ipato(card);
+ spin_unlock_bh(&card->ip_lock);
+ }
+out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 6e3d819..d522654 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1725,6 +1725,7 @@ struct aac_dev
#define FIB_CONTEXT_FLAG_NATIVE_HBA (0x00000010)
#define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF (0x00000020)
#define FIB_CONTEXT_FLAG_SCSI_CMD (0x00000040)
+#define FIB_CONTEXT_FLAG_EH_RESET (0x00000080)
/*
* Define the command values
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index bec9f31..80a8cb2 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2482,8 +2482,8 @@ int aac_command_thread(void *data)
/* Synchronize our watches */
if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
&& (now.tv_nsec > (NSEC_PER_SEC / HZ)))
- difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ)
- + NSEC_PER_SEC / 2) / NSEC_PER_SEC;
+ difference = HZ + HZ / 2 -
+ now.tv_nsec / (NSEC_PER_SEC / HZ);
else {
if (now.tv_nsec > NSEC_PER_SEC / 2)
++now.tv_sec;
@@ -2507,6 +2507,10 @@ int aac_command_thread(void *data)
if (kthread_should_stop())
break;
+ /*
+ * we probably want usleep_range() here instead of the
+ * jiffies computation
+ */
schedule_timeout(difference);
if (kthread_should_stop())
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index bdf127a..d55332d 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1037,7 +1037,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
info = &aac->hba_map[bus][cid];
if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
- fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+ fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
}
}
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 72ca2a2..b2fa195 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3135,7 +3135,8 @@ bfad_im_bsg_vendor_request(struct bsg_job *job)
struct fc_bsg_request *bsg_request = job->request;
struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
- struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
struct bfad_s *bfad = im_port->bfad;
void *payload_kbuf;
int rc = -EINVAL;
@@ -3350,7 +3351,8 @@ int
bfad_im_bsg_els_ct_request(struct bsg_job *job)
{
struct bfa_bsg_data *bsg_data;
- struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
struct bfad_s *bfad = im_port->bfad;
bfa_bsg_fcpt_t *bsg_fcpt;
struct bfad_fcxp *drv_fcxp;
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 24e657a..c05d6e9 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -546,6 +546,7 @@ int
bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
struct device *dev)
{
+ struct bfad_im_port_pointer *im_portp;
int error = 1;
mutex_lock(&bfad_mutex);
@@ -564,7 +565,8 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
goto out_free_idr;
}
- im_port->shost->hostdata[0] = (unsigned long)im_port;
+ im_portp = shost_priv(im_port->shost);
+ im_portp->p = im_port;
im_port->shost->unique_id = im_port->idr_id;
im_port->shost->this_id = -1;
im_port->shost->max_id = MAX_FCP_TARGET;
@@ -748,7 +750,7 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
sht->sg_tablesize = bfad->cfg_data.io_max_sge;
- return scsi_host_alloc(sht, sizeof(unsigned long));
+ return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer));
}
void
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index c81ec2a..06ce4ba 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -69,6 +69,16 @@ struct bfad_im_port_s {
struct fc_vport *fc_vport;
};
+struct bfad_im_port_pointer {
+ struct bfad_im_port_s *p;
+};
+
+static inline struct bfad_im_port_s *bfad_get_im_port(struct Scsi_Host *host)
+{
+ struct bfad_im_port_pointer *im_portp = shost_priv(host);
+ return im_portp->p;
+}
+
enum bfad_itnim_state {
ITNIM_STATE_NONE,
ITNIM_STATE_ONLINE,
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 5da4605..21be672 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -904,10 +904,14 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
case ELS_FLOGI:
if (!lport->point_to_multipoint)
fc_lport_recv_flogi_req(lport, fp);
+ else
+ fc_rport_recv_req(lport, fp);
break;
case ELS_LOGO:
if (fc_frame_sid(fp) == FC_FID_FLOGI)
fc_lport_recv_logo_req(lport, fp);
+ else
+ fc_rport_recv_req(lport, fp);
break;
case ELS_RSCN:
lport->tt.disc_recv_req(lport, fp);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index ca15662..3183d63 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2145,7 +2145,7 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
struct sas_rphy *rphy)
{
struct domain_device *dev;
- unsigned int reslen = 0;
+ unsigned int rcvlen = 0;
int ret = -EINVAL;
/* no rphy means no smp target support (ie aic94xx host) */
@@ -2179,12 +2179,12 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
ret = smp_execute_task_sg(dev, job->request_payload.sg_list,
job->reply_payload.sg_list);
- if (ret > 0) {
- /* positive number is the untransferred residual */
- reslen = ret;
+ if (ret >= 0) {
+ /* bsg_job_done() requires the length received */
+ rcvlen = job->reply_payload.payload_len - ret;
ret = 0;
}
out:
- bsg_job_done(job, ret, reslen);
+ bsg_job_done(job, ret, rcvlen);
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 58476b7..c940685 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -486,15 +486,28 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
int sas_eh_abort_handler(struct scsi_cmnd *cmd)
{
- int res;
+ int res = TMF_RESP_FUNC_FAILED;
struct sas_task *task = TO_SAS_TASK(cmd);
struct Scsi_Host *host = cmd->device->host;
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_internal *i = to_sas_internal(host->transportt);
+ unsigned long flags;
if (!i->dft->lldd_abort_task)
return FAILED;
- res = i->dft->lldd_abort_task(task);
+ spin_lock_irqsave(host->host_lock, flags);
+ /* We cannot do async aborts for SATA devices */
+ if (dev_is_sata(dev) && !host->host_eh_scheduled) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return FAILED;
+ }
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ if (task)
+ res = i->dft->lldd_abort_task(task);
+ else
+ SAS_DPRINTK("no task to abort\n");
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 56faeb0..87c08ff 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -753,12 +753,12 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
if (rc < 0) {
- (rqbp->rqb_free_buffer)(phba, rqb_entry);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6409 Cannot post to RQ %d: %x %x\n",
rqb_entry->hrq->queue_id,
rqb_entry->hrq->host_index,
rqb_entry->hrq->hba_index);
+ (rqbp->rqb_free_buffer)(phba, rqb_entry);
} else {
list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
rqbp->buffer_count++;
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index a4f28b7..e188771 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1576,7 +1576,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
return req;
for_each_bio(bio) {
- ret = blk_rq_append_bio(req, bio);
+ struct bio *bounce_bio = bio;
+
+ ret = blk_rq_append_bio(req, &bounce_bio);
if (ret)
return ERR_PTR(ret);
}
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index 01f08c0..c3765d2 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -8,9 +8,11 @@ void scsi_show_rq(struct seq_file *m, struct request *rq)
{
struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
- char buf[80];
+ const u8 *const cdb = READ_ONCE(cmd->cmnd);
+ char buf[80] = "(?)";
- __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
+ if (cdb)
+ __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len);
seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf,
cmd->retries, msecs / 1000, msecs % 1000);
}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 78d4aa8d..dfb8da8 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -34,7 +34,6 @@ struct scsi_dev_info_list_table {
};
-static const char spaces[] = " "; /* 16 of them */
static blist_flags_t scsi_default_dev_flags;
static LIST_HEAD(scsi_dev_info_list);
static char scsi_dev_flags[256];
@@ -298,20 +297,13 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
size_t from_length;
from_length = strlen(from);
- strncpy(to, from, min(to_length, from_length));
- if (from_length < to_length) {
- if (compatible) {
- /*
- * NUL terminate the string if it is short.
- */
- to[from_length] = '\0';
- } else {
- /*
- * space pad the string if it is short.
- */
- strncpy(&to[from_length], spaces,
- to_length - from_length);
- }
+ /* This zero-pads the destination */
+ strncpy(to, from, to_length);
+ if (from_length < to_length && !compatible) {
+ /*
+ * space pad the string if it is short.
+ */
+ memset(&to[from_length], ' ', to_length - from_length);
}
if (from_length > to_length)
printk(KERN_WARNING "%s: %s string '%s' is too long\n",
@@ -382,10 +374,8 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
model, compatible);
if (strflags)
- devinfo->flags = simple_strtoul(strflags, NULL, 0);
- else
- devinfo->flags = flags;
-
+ flags = (__force blist_flags_t)simple_strtoul(strflags, NULL, 0);
+ devinfo->flags = flags;
devinfo->compatible = compatible;
if (compatible)
@@ -458,7 +448,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
/*
* vendor strings must be an exact match
*/
- if (vmax != strlen(devinfo->vendor) ||
+ if (vmax != strnlen(devinfo->vendor,
+ sizeof(devinfo->vendor)) ||
memcmp(devinfo->vendor, vskip, vmax))
continue;
@@ -466,7 +457,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
* @model specifies the full string, and
* must be larger or equal to devinfo->model
*/
- mlen = strlen(devinfo->model);
+ mlen = strnlen(devinfo->model, sizeof(devinfo->model));
if (mmax < mlen || memcmp(devinfo->model, mskip, mlen))
continue;
return devinfo;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 00742c5..d9ca1df 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1967,6 +1967,8 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
out_put_device:
put_device(&sdev->sdev_gendev);
out:
+ if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
+ blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
return false;
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index be5e919..0880d97 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -770,7 +770,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
* SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
**/
static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
- int *bflags, int async)
+ blist_flags_t *bflags, int async)
{
int ret;
@@ -1049,14 +1049,15 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
* - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
**/
static int scsi_probe_and_add_lun(struct scsi_target *starget,
- u64 lun, int *bflagsp,
+ u64 lun, blist_flags_t *bflagsp,
struct scsi_device **sdevp,
enum scsi_scan_mode rescan,
void *hostdata)
{
struct scsi_device *sdev;
unsigned char *result;
- int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
+ blist_flags_t bflags;
+ int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
/*
@@ -1201,7 +1202,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* Modifies sdevscan->lun.
**/
static void scsi_sequential_lun_scan(struct scsi_target *starget,
- int bflags, int scsi_level,
+ blist_flags_t bflags, int scsi_level,
enum scsi_scan_mode rescan)
{
uint max_dev_lun;
@@ -1292,7 +1293,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
* 0: scan completed (or no memory, so further scanning is futile)
* 1: could not scan with REPORT LUN
**/
-static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
+static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
enum scsi_scan_mode rescan)
{
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -1538,7 +1539,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
unsigned int id, u64 lun, enum scsi_scan_mode rescan)
{
struct Scsi_Host *shost = dev_to_shost(parent);
- int bflags = 0;
+ blist_flags_t bflags = 0;
int res;
struct scsi_target *starget;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 50e7d7e..26ce1717 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -967,7 +967,8 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
-#define BLIST_FLAG_NAME(name) [ilog2(BLIST_##name)] = #name
+#define BLIST_FLAG_NAME(name) \
+ [ilog2((__force unsigned int)BLIST_##name)] = #name
static const char *const sdev_bflags_name[] = {
#include "scsi_devinfo_tbl.c"
};
@@ -984,7 +985,7 @@ sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) {
const char *name = NULL;
- if (!(sdev->sdev_bflags & BIT(i)))
+ if (!(sdev->sdev_bflags & (__force blist_flags_t)BIT(i)))
continue;
if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i])
name = sdev_bflags_name[i];
@@ -1414,7 +1415,10 @@ static void __scsi_remove_target(struct scsi_target *starget)
* check.
*/
if (sdev->channel != starget->channel ||
- sdev->id != starget->id ||
+ sdev->id != starget->id)
+ continue;
+ if (sdev->sdev_state == SDEV_DEL ||
+ sdev->sdev_state == SDEV_CANCEL ||
!get_device(&sdev->sdev_gendev))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index d0219e3..871ea58 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -26,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include <scsi/scsi.h>
#include "scsi_priv.h"
#include <scsi/scsi_device.h>
@@ -50,14 +51,14 @@
/* Our blacklist flags */
enum {
- SPI_BLIST_NOIUS = 0x1,
+ SPI_BLIST_NOIUS = (__force blist_flags_t)0x1,
};
/* blacklist table, modelled on scsi_devinfo.c */
static struct {
char *vendor;
char *model;
- unsigned flags;
+ blist_flags_t flags;
} spi_static_device_list[] __initdata = {
{"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS },
{"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS },
@@ -221,9 +222,11 @@ static int spi_device_configure(struct transport_container *tc,
{
struct scsi_device *sdev = to_scsi_device(dev);
struct scsi_target *starget = sdev->sdev_target;
- unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
- &sdev->inquiry[16],
- SCSI_DEVINFO_SPI);
+ blist_flags_t bflags;
+
+ bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
+ &sdev->inquiry[16],
+ SCSI_DEVINFO_SPI);
/* Populate the target capability fields with the values
* gleaned from the device inquiry */
@@ -1007,11 +1010,20 @@ spi_dv_device(struct scsi_device *sdev)
u8 *buffer;
const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
+ /*
+ * Because this function and the power management code both call
+ * scsi_device_quiesce(), it is not safe to perform domain validation
+ * while suspend or resume is in progress. Hence the
+ * lock/unlock_system_sleep() calls.
+ */
+ lock_system_sleep();
+
if (unlikely(spi_dv_in_progress(starget)))
- return;
+ goto unlock;
if (unlikely(scsi_device_get(sdev)))
- return;
+ goto unlock;
+
spi_dv_in_progress(starget) = 1;
buffer = kzalloc(len, GFP_KERNEL);
@@ -1047,6 +1059,8 @@ spi_dv_device(struct scsi_device *sdev)
out_put:
spi_dv_in_progress(starget) = 0;
scsi_device_put(sdev);
+unlock:
+ unlock_system_sleep();
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 24fe685..a028ab3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1312,6 +1312,7 @@ static int sd_init_command(struct scsi_cmnd *cmd)
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
struct request *rq = SCpnt->request;
+ u8 *cmnd;
if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK)
sd_zbc_write_unlock_zone(SCpnt);
@@ -1320,9 +1321,10 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
__free_page(rq->special_vec.bv_page);
if (SCpnt->cmnd != scsi_req(rq)->cmd) {
- mempool_free(SCpnt->cmnd, sd_cdb_pool);
+ cmnd = SCpnt->cmnd;
SCpnt->cmnd = NULL;
SCpnt->cmd_len = 0;
+ mempool_free(cmnd, sd_cdb_pool);
}
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 1b06cf0..3b3d1d0 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -953,10 +953,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
case TEST_UNIT_READY:
break;
default:
- set_host_byte(scmnd, DID_TARGET_FAILURE);
+ set_host_byte(scmnd, DID_ERROR);
}
break;
case SRB_STATUS_INVALID_LUN:
+ set_host_byte(scmnd, DID_NO_CONNECT);
do_work = true;
process_err_fn = storvsc_remove_lun;
break;
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 77fe55c..7dcb14d 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -27,6 +27,8 @@
#define DRIVER_NAME "armada_3700_spi"
+#define A3700_SPI_MAX_SPEED_HZ 100000000
+#define A3700_SPI_MAX_PRESCALE 30
#define A3700_SPI_TIMEOUT 10
/* SPI Register Offest */
@@ -79,6 +81,7 @@
#define A3700_SPI_BYTE_LEN BIT(5)
#define A3700_SPI_CLK_PRESCALE BIT(0)
#define A3700_SPI_CLK_PRESCALE_MASK (0x1f)
+#define A3700_SPI_CLK_EVEN_OFFS (0x10)
#define A3700_SPI_WFIFO_THRS_BIT 28
#define A3700_SPI_RFIFO_THRS_BIT 24
@@ -183,12 +186,15 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
return 0;
}
-static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi)
+static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi, bool enable)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
- val |= A3700_SPI_FIFO_MODE;
+ if (enable)
+ val |= A3700_SPI_FIFO_MODE;
+ else
+ val &= ~A3700_SPI_FIFO_MODE;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
@@ -220,6 +226,13 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
+ /* For prescaler values over 15, we can only set it by steps of 2.
+ * Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to
+ * 30. We only use this range from 16 to 30.
+ */
+ if (prescale > 15)
+ prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2);
+
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
@@ -289,7 +302,7 @@ static int a3700_spi_init(struct a3700_spi *a3700_spi)
a3700_spi_deactivate_cs(a3700_spi, i);
/* Enable FIFO mode */
- a3700_spi_fifo_mode_set(a3700_spi);
+ a3700_spi_fifo_mode_set(a3700_spi, true);
/* Set SPI mode */
a3700_spi_mode_set(a3700_spi, master->mode_bits);
@@ -408,15 +421,20 @@ static void a3700_spi_transfer_setup(struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi;
- unsigned int byte_len;
a3700_spi = spi_master_get_devdata(spi->master);
a3700_spi_clock_set(a3700_spi, xfer->speed_hz);
- byte_len = xfer->bits_per_word >> 3;
+ /* Use 4 bytes long transfers. Each transfer method has its way to deal
+ * with the remaining bytes for non 4-bytes aligned transfers.
+ */
+ a3700_spi_bytelen_set(a3700_spi, 4);
- a3700_spi_fifo_thres_set(a3700_spi, byte_len);
+ /* Initialize the working buffers */
+ a3700_spi->tx_buf = xfer->tx_buf;
+ a3700_spi->rx_buf = xfer->rx_buf;
+ a3700_spi->buf_len = xfer->len;
}
static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
@@ -483,7 +501,7 @@ static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
u32 val;
while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
- val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
+ val = *(u32 *)a3700_spi->tx_buf;
spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
a3700_spi->buf_len -= 4;
a3700_spi->tx_buf += 4;
@@ -506,9 +524,8 @@ static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi)
while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) {
val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
if (a3700_spi->buf_len >= 4) {
- u32 data = le32_to_cpu(val);
- memcpy(a3700_spi->rx_buf, &data, 4);
+ memcpy(a3700_spi->rx_buf, &val, 4);
a3700_spi->buf_len -= 4;
a3700_spi->rx_buf += 4;
@@ -571,27 +588,26 @@ static int a3700_spi_prepare_message(struct spi_master *master,
if (ret)
return ret;
- a3700_spi_bytelen_set(a3700_spi, 4);
-
a3700_spi_mode_set(a3700_spi, spi->mode);
return 0;
}
-static int a3700_spi_transfer_one(struct spi_master *master,
+static int a3700_spi_transfer_one_fifo(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
int ret = 0, timeout = A3700_SPI_TIMEOUT;
- unsigned int nbits = 0;
+ unsigned int nbits = 0, byte_len;
u32 val;
- a3700_spi_transfer_setup(spi, xfer);
+ /* Make sure we use FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi, true);
- a3700_spi->tx_buf = xfer->tx_buf;
- a3700_spi->rx_buf = xfer->rx_buf;
- a3700_spi->buf_len = xfer->len;
+ /* Configure FIFO thresholds */
+ byte_len = xfer->bits_per_word >> 3;
+ a3700_spi_fifo_thres_set(a3700_spi, byte_len);
if (xfer->tx_buf)
nbits = xfer->tx_nbits;
@@ -607,6 +623,11 @@ static int a3700_spi_transfer_one(struct spi_master *master,
a3700_spi_header_set(a3700_spi);
if (xfer->rx_buf) {
+ /* Clear WFIFO, since it's last 2 bytes are shifted out during
+ * a read operation
+ */
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, 0);
+
/* Set read data length */
spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG,
a3700_spi->buf_len);
@@ -721,6 +742,63 @@ out:
return ret;
}
+static int a3700_spi_transfer_one_full_duplex(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ u32 val;
+
+ /* Disable FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi, false);
+
+ while (a3700_spi->buf_len) {
+
+ /* When we have less than 4 bytes to transfer, switch to 1 byte
+ * mode. This is reset after each transfer
+ */
+ if (a3700_spi->buf_len < 4)
+ a3700_spi_bytelen_set(a3700_spi, 1);
+
+ if (a3700_spi->byte_len == 1)
+ val = *a3700_spi->tx_buf;
+ else
+ val = *(u32 *)a3700_spi->tx_buf;
+
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
+
+ /* Wait for all the data to be shifted in / out */
+ while (!(spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG) &
+ A3700_SPI_XFER_DONE))
+ cpu_relax();
+
+ val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
+
+ memcpy(a3700_spi->rx_buf, &val, a3700_spi->byte_len);
+
+ a3700_spi->buf_len -= a3700_spi->byte_len;
+ a3700_spi->tx_buf += a3700_spi->byte_len;
+ a3700_spi->rx_buf += a3700_spi->byte_len;
+
+ }
+
+ spi_finalize_current_transfer(master);
+
+ return 0;
+}
+
+static int a3700_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ a3700_spi_transfer_setup(spi, xfer);
+
+ if (xfer->tx_buf && xfer->rx_buf)
+ return a3700_spi_transfer_one_full_duplex(master, spi, xfer);
+
+ return a3700_spi_transfer_one_fifo(master, spi, xfer);
+}
+
static int a3700_spi_unprepare_message(struct spi_master *master,
struct spi_message *message)
{
@@ -770,7 +848,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
master->transfer_one = a3700_spi_transfer_one;
master->unprepare_message = a3700_spi_unprepare_message;
master->set_cs = a3700_spi_set_cs;
- master->flags = SPI_MASTER_HALF_DUPLEX;
master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD);
@@ -810,6 +887,11 @@ static int a3700_spi_probe(struct platform_device *pdev)
goto error;
}
+ master->max_speed_hz = min_t(unsigned long, A3700_SPI_MAX_SPEED_HZ,
+ clk_get_rate(spi->clk));
+ master->min_speed_hz = DIV_ROUND_UP(clk_get_rate(spi->clk),
+ A3700_SPI_MAX_PRESCALE);
+
ret = a3700_spi_init(spi);
if (ret)
goto error_clk;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index f95da36..4a11fc0 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -291,6 +291,10 @@ struct atmel_spi {
struct spi_transfer *current_transfer;
int current_remaining_bytes;
int done_status;
+ dma_addr_t dma_addr_rx_bbuf;
+ dma_addr_t dma_addr_tx_bbuf;
+ void *addr_rx_bbuf;
+ void *addr_tx_bbuf;
struct completion xfer_completion;
@@ -436,6 +440,11 @@ static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
spin_unlock_irqrestore(&as->lock, as->flags);
}
+static inline bool atmel_spi_is_vmalloc_xfer(struct spi_transfer *xfer)
+{
+ return is_vmalloc_addr(xfer->tx_buf) || is_vmalloc_addr(xfer->rx_buf);
+}
+
static inline bool atmel_spi_use_dma(struct atmel_spi *as,
struct spi_transfer *xfer)
{
@@ -448,7 +457,12 @@ static bool atmel_spi_can_dma(struct spi_master *master,
{
struct atmel_spi *as = spi_master_get_devdata(master);
- return atmel_spi_use_dma(as, xfer);
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
+ return atmel_spi_use_dma(as, xfer) &&
+ !atmel_spi_is_vmalloc_xfer(xfer);
+ else
+ return atmel_spi_use_dma(as, xfer);
+
}
static int atmel_spi_dma_slave_config(struct atmel_spi *as,
@@ -594,6 +608,11 @@ static void dma_callback(void *data)
struct spi_master *master = data;
struct atmel_spi *as = spi_master_get_devdata(master);
+ if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ memcpy(as->current_transfer->rx_buf, as->addr_rx_bbuf,
+ as->current_transfer->len);
+ }
complete(&as->xfer_completion);
}
@@ -744,17 +763,41 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
goto err_exit;
/* Send both scatterlists */
- rxdesc = dmaengine_prep_slave_sg(rxchan,
- xfer->rx_sg.sgl, xfer->rx_sg.nents,
- DMA_FROM_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (atmel_spi_is_vmalloc_xfer(xfer) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ rxdesc = dmaengine_prep_slave_single(rxchan,
+ as->dma_addr_rx_bbuf,
+ xfer->len,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ } else {
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
+ xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ }
if (!rxdesc)
goto err_dma;
- txdesc = dmaengine_prep_slave_sg(txchan,
- xfer->tx_sg.sgl, xfer->tx_sg.nents,
- DMA_TO_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (atmel_spi_is_vmalloc_xfer(xfer) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ memcpy(as->addr_tx_bbuf, xfer->tx_buf, xfer->len);
+ txdesc = dmaengine_prep_slave_single(txchan,
+ as->dma_addr_tx_bbuf,
+ xfer->len, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ } else {
+ txdesc = dmaengine_prep_slave_sg(txchan,
+ xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ }
if (!txdesc)
goto err_dma;
@@ -1426,27 +1469,7 @@ static void atmel_get_caps(struct atmel_spi *as)
as->caps.is_spi2 = version > 0x121;
as->caps.has_wdrbt = version >= 0x210;
-#ifdef CONFIG_SOC_SAM_V4_V5
- /*
- * Atmel SoCs based on ARM9 (SAM9x) cores should not use spi_map_buf()
- * since this later function tries to map buffers with dma_map_sg()
- * even if they have not been allocated inside DMA-safe areas.
- * On SoCs based on Cortex A5 (SAMA5Dx), it works anyway because for
- * those ARM cores, the data cache follows the PIPT model.
- * Also the L2 cache controller of SAMA5D2 uses the PIPT model too.
- * In case of PIPT caches, there cannot be cache aliases.
- * However on ARM9 cores, the data cache follows the VIVT model, hence
- * the cache aliases issue can occur when buffers are allocated from
- * DMA-unsafe areas, by vmalloc() for instance, where cache coherency is
- * not taken into account or at least not handled completely (cache
- * lines of aliases are not invalidated).
- * This is not a theorical issue: it was reproduced when trying to mount
- * a UBI file-system on a at91sam9g35ek board.
- */
- as->caps.has_dma_support = false;
-#else
as->caps.has_dma_support = version >= 0x212;
-#endif
as->caps.has_pdc_support = version < 0x212;
}
@@ -1592,6 +1615,30 @@ static int atmel_spi_probe(struct platform_device *pdev)
as->use_pdc = true;
}
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ as->addr_rx_bbuf = dma_alloc_coherent(&pdev->dev,
+ SPI_MAX_DMA_XFER,
+ &as->dma_addr_rx_bbuf,
+ GFP_KERNEL | GFP_DMA);
+ if (!as->addr_rx_bbuf) {
+ as->use_dma = false;
+ } else {
+ as->addr_tx_bbuf = dma_alloc_coherent(&pdev->dev,
+ SPI_MAX_DMA_XFER,
+ &as->dma_addr_tx_bbuf,
+ GFP_KERNEL | GFP_DMA);
+ if (!as->addr_tx_bbuf) {
+ as->use_dma = false;
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_rx_bbuf,
+ as->dma_addr_rx_bbuf);
+ }
+ }
+ if (!as->use_dma)
+ dev_info(master->dev.parent,
+ " can not allocate dma coherent memory\n");
+ }
+
if (as->caps.has_dma_support && !as->use_dma)
dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
@@ -1661,12 +1708,20 @@ static int atmel_spi_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
/* reset the hardware and block queue progress */
- spin_lock_irq(&as->lock);
if (as->use_dma) {
atmel_spi_stop_dma(master);
atmel_spi_release_dma(master);
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_tx_bbuf,
+ as->dma_addr_tx_bbuf);
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_rx_bbuf,
+ as->dma_addr_rx_bbuf);
+ }
}
+ spin_lock_irq(&as->lock);
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
spi_readl(as, SR);
diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c
index 6e409ea..d02ceb7 100644
--- a/drivers/spi/spi-bcm53xx.c
+++ b/drivers/spi/spi-bcm53xx.c
@@ -27,8 +27,6 @@ struct bcm53xxspi {
struct bcma_device *core;
struct spi_master *master;
void __iomem *mmio_base;
-
- size_t read_offset;
bool bspi; /* Boot SPI mode with memory mapping */
};
@@ -172,8 +170,6 @@ static void bcm53xxspi_buf_write(struct bcm53xxspi *b53spi, u8 *w_buf,
if (!cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
-
- b53spi->read_offset = len;
}
static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
@@ -182,10 +178,10 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
u32 tmp;
int i;
- for (i = 0; i < b53spi->read_offset + len; i++) {
+ for (i = 0; i < len; i++) {
tmp = B53SPI_CDRAM_CONT | B53SPI_CDRAM_PCS_DISABLE_ALL |
B53SPI_CDRAM_PCS_DSCK;
- if (!cont && i == b53spi->read_offset + len - 1)
+ if (!cont && i == len - 1)
tmp &= ~B53SPI_CDRAM_CONT;
tmp &= ~0x1;
/* Command Register File */
@@ -194,8 +190,7 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
/* Set queue pointers */
bcm53xxspi_write(b53spi, B53SPI_MSPI_NEWQP, 0);
- bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP,
- b53spi->read_offset + len - 1);
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP, len - 1);
if (cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 1);
@@ -214,13 +209,11 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
for (i = 0; i < len; ++i) {
- int offset = b53spi->read_offset + i;
+ u16 reg = B53SPI_MSPI_RXRAM + 4 * (1 + i * 2);
/* Data stored in the transmit register file LSB */
- r_buf[i] = (u8)bcm53xxspi_read(b53spi, B53SPI_MSPI_RXRAM + 4 * (1 + offset * 2));
+ r_buf[i] = (u8)bcm53xxspi_read(b53spi, reg);
}
-
- b53spi->read_offset = 0;
}
static int bcm53xxspi_transfer_one(struct spi_master *master,
@@ -238,7 +231,8 @@ static int bcm53xxspi_transfer_one(struct spi_master *master,
left = t->len;
while (left) {
size_t to_write = min_t(size_t, 16, left);
- bool cont = left - to_write > 0;
+ bool cont = !spi_transfer_is_last(master, t) ||
+ left - to_write > 0;
bcm53xxspi_buf_write(b53spi, buf, to_write, cont);
left -= to_write;
@@ -250,9 +244,9 @@ static int bcm53xxspi_transfer_one(struct spi_master *master,
buf = (u8 *)t->rx_buf;
left = t->len;
while (left) {
- size_t to_read = min_t(size_t, 16 - b53spi->read_offset,
- left);
- bool cont = left - to_read > 0;
+ size_t to_read = min_t(size_t, 16, left);
+ bool cont = !spi_transfer_is_last(master, t) ||
+ left - to_read > 0;
bcm53xxspi_buf_read(b53spi, buf, to_read, cont);
left -= to_read;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 6ddb6ef..60d59b0 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -945,6 +945,8 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto free_master;
}
+ init_completion(&dspi->done);
+
ret = platform_get_irq(pdev, 0);
if (ret == 0)
ret = -EINVAL;
@@ -1021,8 +1023,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
dspi->get_rx = davinci_spi_rx_buf_u8;
dspi->get_tx = davinci_spi_tx_buf_u8;
- init_completion(&dspi->done);
-
/* Reset In/OUT SPI module */
iowrite32(0, dspi->base + SPIGCR0);
udelay(100);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index b217c22..211cc7d 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -30,13 +30,11 @@
/* Slave spi_dev related */
struct chip_data {
- u8 cs; /* chip select pin */
u8 tmode; /* TR/TO/RO/EEPROM */
u8 type; /* SPI/SSP/MicroWire */
u8 poll_mode; /* 1 means use poll mode */
- u8 enable_dma;
u16 clk_div; /* baud rate divider */
u32 speed_hz; /* baud rate */
void (*cs_control)(u32 command);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index f652f70..0630962 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -903,10 +903,9 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
}
static const struct of_device_id fsl_dspi_dt_ids[] = {
- { .compatible = "fsl,vf610-dspi", .data = (void *)&vf610_data, },
- { .compatible = "fsl,ls1021a-v1.0-dspi",
- .data = (void *)&ls1021a_v1_data, },
- { .compatible = "fsl,ls2085a-dspi", .data = (void *)&ls2085a_data, },
+ { .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
+ { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
+ { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
@@ -980,7 +979,7 @@ static int dspi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
master->cleanup = dspi_cleanup;
- master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
SPI_BPW_MASK(16);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 79ddefe..6f57592 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1622,6 +1622,11 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->devtype_data->intctrl(spi_imx, 0);
master->dev.of_node = pdev->dev.of_node;
+ ret = spi_bitbang_start(&spi_imx->bitbang);
+ if (ret) {
+ dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
+ goto out_clk_put;
+ }
/* Request GPIO CS lines, if any */
if (!spi_imx->slave_mode && master->cs_gpios) {
@@ -1640,12 +1645,6 @@ static int spi_imx_probe(struct platform_device *pdev)
}
}
- ret = spi_bitbang_start(&spi_imx->bitbang);
- if (ret) {
- dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
- goto out_clk_put;
- }
-
dev_info(&pdev->dev, "probed\n");
clk_disable(spi_imx->clk_ipg);
@@ -1668,12 +1667,23 @@ static int spi_imx_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ int ret;
spi_bitbang_stop(&spi_imx->bitbang);
+ ret = clk_enable(spi_imx->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(spi_imx->clk_ipg);
+ if (ret) {
+ clk_disable(spi_imx->clk_per);
+ return ret;
+ }
+
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_unprepare(spi_imx->clk_ipg);
- clk_unprepare(spi_imx->clk_per);
+ clk_disable_unprepare(spi_imx->clk_ipg);
+ clk_disable_unprepare(spi_imx->clk_per);
spi_imx_sdma_exit(spi_imx);
spi_master_put(master);
diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c
index cebfea5..dafed62 100644
--- a/drivers/spi/spi-jcore.c
+++ b/drivers/spi/spi-jcore.c
@@ -198,8 +198,10 @@ static int jcore_spi_probe(struct platform_device *pdev)
/* Register our spi controller */
err = devm_spi_register_master(&pdev->dev, master);
- if (err)
+ if (err) {
+ clk_disable(clk);
goto exit;
+ }
return 0;
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 7f84296..5c82910 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -599,6 +599,7 @@ static int meson_spicc_remove(struct platform_device *pdev)
static const struct of_device_id meson_spicc_of_match[] = {
{ .compatible = "amlogic,meson-gx-spicc", },
+ { .compatible = "amlogic,meson-axg-spicc", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_spicc_of_match);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 8974bb3..deca63e 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -94,6 +94,7 @@ struct orion_spi {
struct spi_master *master;
void __iomem *base;
struct clk *clk;
+ struct clk *axi_clk;
const struct orion_spi_dev *devdata;
struct orion_direct_acc direct_access[ORION_NUM_CHIPSELECTS];
@@ -634,6 +635,16 @@ static int orion_spi_probe(struct platform_device *pdev)
if (status)
goto out;
+ /* The following clock is only used by some SoCs */
+ spi->axi_clk = devm_clk_get(&pdev->dev, "axi");
+ if (IS_ERR(spi->axi_clk) &&
+ PTR_ERR(spi->axi_clk) == -EPROBE_DEFER) {
+ status = -EPROBE_DEFER;
+ goto out_rel_clk;
+ }
+ if (!IS_ERR(spi->axi_clk))
+ clk_prepare_enable(spi->axi_clk);
+
tclk_hz = clk_get_rate(spi->clk);
/*
@@ -658,7 +669,7 @@ static int orion_spi_probe(struct platform_device *pdev)
spi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(spi->base)) {
status = PTR_ERR(spi->base);
- goto out_rel_clk;
+ goto out_rel_axi_clk;
}
/* Scan all SPI devices of this controller for direct mapped devices */
@@ -696,7 +707,7 @@ static int orion_spi_probe(struct platform_device *pdev)
PAGE_SIZE);
if (!spi->direct_access[cs].vaddr) {
status = -ENOMEM;
- goto out_rel_clk;
+ goto out_rel_axi_clk;
}
spi->direct_access[cs].size = PAGE_SIZE;
@@ -724,6 +735,8 @@ static int orion_spi_probe(struct platform_device *pdev)
out_rel_pm:
pm_runtime_disable(&pdev->dev);
+out_rel_axi_clk:
+ clk_disable_unprepare(spi->axi_clk);
out_rel_clk:
clk_disable_unprepare(spi->clk);
out:
@@ -738,6 +751,7 @@ static int orion_spi_remove(struct platform_device *pdev)
struct orion_spi *spi = spi_master_get_devdata(master);
pm_runtime_get_sync(&pdev->dev);
+ clk_disable_unprepare(spi->axi_clk);
clk_disable_unprepare(spi->clk);
spi_unregister_master(master);
@@ -754,6 +768,7 @@ static int orion_spi_runtime_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct orion_spi *spi = spi_master_get_devdata(master);
+ clk_disable_unprepare(spi->axi_clk);
clk_disable_unprepare(spi->clk);
return 0;
}
@@ -763,6 +778,8 @@ static int orion_spi_runtime_resume(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct orion_spi *spi = spi_master_get_devdata(master);
+ if (!IS_ERR(spi->axi_clk))
+ clk_prepare_enable(spi->axi_clk);
return clk_prepare_enable(spi->clk);
}
#endif
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 4cb515a..b0822d1 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1237,7 +1237,7 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
* different chip_info, release previously requested GPIO
*/
if (chip->gpiod_cs) {
- gpio_free(desc_to_gpio(chip->gpiod_cs));
+ gpiod_put(chip->gpiod_cs);
chip->gpiod_cs = NULL;
}
@@ -1417,7 +1417,7 @@ static void cleanup(struct spi_device *spi)
if (drv_data->ssp_type != CE4100_SSP && !drv_data->cs_gpiods &&
chip->gpiod_cs)
- gpio_free(desc_to_gpio(chip->gpiod_cs));
+ gpiod_put(chip->gpiod_cs);
kfree(chip);
}
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 2ce8757..0835a8d 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -377,8 +377,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
/* Sets SPCMD */
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
- /* Enables SPI function in master mode */
- rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
return 0;
}
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index de7df20..baa3a9f 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1,17 +1,7 @@
-/*
- * Copyright (C) 2009 Samsung Electronics Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2009 Samsung Electronics Co., Ltd.
+// Jaswinder Singh <jassi.brar@samsung.com>
#include <linux/init.h>
#include <linux/module.h>
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index fcd261f..c5dcfb4 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -19,6 +19,7 @@
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -55,9 +56,14 @@ struct sh_msiof_spi_priv {
void *rx_dma_page;
dma_addr_t tx_dma_addr;
dma_addr_t rx_dma_addr;
+ unsigned short unused_ss;
+ bool native_cs_inited;
+ bool native_cs_high;
bool slave_aborted;
};
+#define MAX_SS 3 /* Maximum number of native chip selects */
+
#define TMDR1 0x00 /* Transmit Mode Register 1 */
#define TMDR2 0x04 /* Transmit Mode Register 2 */
#define TMDR3 0x08 /* Transmit Mode Register 3 */
@@ -91,6 +97,8 @@ struct sh_msiof_spi_priv {
#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
/* TMDR1 */
#define TMDR1_PCON 0x40000000 /* Transfer Signal Connection */
+#define TMDR1_SYNCCH_MASK 0xc000000 /* Synchronization Signal Channel Select */
+#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
/* TMDR2 and RMDR2 */
#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
@@ -324,7 +332,7 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
return val;
}
-static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
+static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
u32 cpol, u32 cpha,
u32 tx_hi_z, u32 lsb_first, u32 cs_high)
{
@@ -342,10 +350,13 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
tmp |= lsb_first << MDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
- if (spi_controller_is_slave(p->master))
+ if (spi_controller_is_slave(p->master)) {
sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
- else
- sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
+ } else {
+ sh_msiof_write(p, TMDR1,
+ tmp | MDR1_TRMD | TMDR1_PCON |
+ (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
+ }
if (p->master->flags & SPI_MASTER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
@@ -528,8 +539,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
{
struct device_node *np = spi->master->dev.of_node;
struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
-
- pm_runtime_get_sync(&p->pdev->dev);
+ u32 clr, set, tmp;
if (!np) {
/*
@@ -539,19 +549,31 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
spi->cs_gpio = (uintptr_t)spi->controller_data;
}
- /* Configure pins before deasserting CS */
- sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
- !!(spi->mode & SPI_CPHA),
- !!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST),
- !!(spi->mode & SPI_CS_HIGH));
+ if (gpio_is_valid(spi->cs_gpio)) {
+ gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ return 0;
+ }
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ if (spi_controller_is_slave(p->master))
+ return 0;
+ if (p->native_cs_inited &&
+ (p->native_cs_high == !!(spi->mode & SPI_CS_HIGH)))
+ return 0;
+ /* Configure native chip select mode/polarity early */
+ clr = MDR1_SYNCMD_MASK;
+ set = MDR1_TRMD | TMDR1_PCON | MDR1_SYNCMD_SPI;
+ if (spi->mode & SPI_CS_HIGH)
+ clr |= BIT(MDR1_SYNCAC_SHIFT);
+ else
+ set |= BIT(MDR1_SYNCAC_SHIFT);
+ pm_runtime_get_sync(&p->pdev->dev);
+ tmp = sh_msiof_read(p, TMDR1) & ~clr;
+ sh_msiof_write(p, TMDR1, tmp | set);
pm_runtime_put(&p->pdev->dev);
-
+ p->native_cs_high = spi->mode & SPI_CS_HIGH;
+ p->native_cs_inited = true;
return 0;
}
@@ -560,13 +582,20 @@ static int sh_msiof_prepare_message(struct spi_master *master,
{
struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
const struct spi_device *spi = msg->spi;
+ u32 ss, cs_high;
/* Configure pins before asserting CS */
- sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ if (gpio_is_valid(spi->cs_gpio)) {
+ ss = p->unused_ss;
+ cs_high = p->native_cs_high;
+ } else {
+ ss = spi->chip_select;
+ cs_high = !!(spi->mode & SPI_CS_HIGH);
+ }
+ sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL),
!!(spi->mode & SPI_CPHA),
!!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST),
- !!(spi->mode & SPI_CS_HIGH));
+ !!(spi->mode & SPI_LSB_FIRST), cs_high);
return 0;
}
@@ -784,11 +813,21 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
goto stop_dma;
}
- /* wait for tx fifo to be emptied / rx fifo to be filled */
+ /* wait for tx/rx DMA completion */
ret = sh_msiof_wait_for_completion(p);
if (ret)
goto stop_reset;
+ if (!rx) {
+ reinit_completion(&p->done);
+ sh_msiof_write(p, IER, IER_TEOFE);
+
+ /* wait for tx fifo to be emptied */
+ ret = sh_msiof_wait_for_completion(p);
+ if (ret)
+ goto stop_reset;
+ }
+
/* clear status bits */
sh_msiof_reset_str(p);
@@ -912,9 +951,8 @@ static int sh_msiof_transfer_one(struct spi_master *master,
ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
if (ret == -EAGAIN) {
- pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
- dev_driver_string(&p->pdev->dev),
- dev_name(&p->pdev->dev));
+ dev_warn_once(&p->pdev->dev,
+ "DMA not available, falling back to PIO\n");
break;
}
if (ret)
@@ -1071,6 +1109,45 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
}
#endif
+static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
+{
+ struct device *dev = &p->pdev->dev;
+ unsigned int used_ss_mask = 0;
+ unsigned int cs_gpios = 0;
+ unsigned int num_cs, i;
+ int ret;
+
+ ret = gpiod_count(dev, "cs");
+ if (ret <= 0)
+ return 0;
+
+ num_cs = max_t(unsigned int, ret, p->master->num_chipselect);
+ for (i = 0; i < num_cs; i++) {
+ struct gpio_desc *gpiod;
+
+ gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
+ if (!IS_ERR(gpiod)) {
+ cs_gpios++;
+ continue;
+ }
+
+ if (PTR_ERR(gpiod) != -ENOENT)
+ return PTR_ERR(gpiod);
+
+ if (i >= MAX_SS) {
+ dev_err(dev, "Invalid native chip select %d\n", i);
+ return -EINVAL;
+ }
+ used_ss_mask |= BIT(i);
+ }
+ p->unused_ss = ffz(used_ss_mask);
+ if (cs_gpios && p->unused_ss >= MAX_SS) {
+ dev_err(dev, "No unused native chip select available\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
{
@@ -1284,13 +1361,18 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
if (p->info->rx_fifo_override)
p->rx_fifo_size = p->info->rx_fifo_override;
+ /* Setup GPIO chip selects */
+ master->num_chipselect = p->info->num_chipselect;
+ ret = sh_msiof_get_cs_gpios(p);
+ if (ret)
+ goto err1;
+
/* init master code */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
master->flags = chipdata->master_flags;
master->bus_num = pdev->id;
master->dev.of_node = pdev->dev.of_node;
- master->num_chipselect = p->info->num_chipselect;
master->setup = sh_msiof_spi_setup;
master->prepare_message = sh_msiof_prepare_message;
master->slave_abort = sh_msiof_slave_abort;
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index bbb1a27..f009d76 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -1072,7 +1072,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
struct sirfsoc_spi *sspi;
struct spi_master *master;
struct resource *mem_res;
- struct sirf_spi_comp_data *spi_comp_data;
+ const struct sirf_spi_comp_data *spi_comp_data;
int irq;
int ret;
const struct of_device_id *match;
@@ -1092,7 +1092,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
sspi->fifo_full_offset = ilog2(sspi->fifo_size);
- spi_comp_data = (struct sirf_spi_comp_data *)match->data;
+ spi_comp_data = match->data;
sspi->regs = spi_comp_data->regs;
sspi->type = spi_comp_data->type;
sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index c5cd635..4141003 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -525,7 +525,7 @@ err_free_master:
static int sun4i_spi_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
return 0;
}
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index fb38234..8533f4e 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -541,7 +541,7 @@ err_free_master:
static int sun6i_spi_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
return 0;
}
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index bc7100b..63fedc4 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
while (remaining_words) {
int n_words, tx_words, rx_words;
u32 sr;
+ int stalled;
n_words = min(remaining_words, xspi->buffer_size);
@@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
/* Read out all the data from the Rx FIFO */
rx_words = n_words;
+ stalled = 10;
while (rx_words) {
+ if (rx_words == n_words && !(stalled--) &&
+ !(sr & XSPI_SR_TX_EMPTY_MASK) &&
+ (sr & XSPI_SR_RX_EMPTY_MASK)) {
+ dev_err(&spi->dev,
+ "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
+ xspi_init_hw(xspi);
+ return -EIO;
+ }
+
if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
xilinx_spi_rx(xspi);
rx_words--;
@@ -370,6 +381,7 @@ static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
}
static const struct of_device_id xilinx_spi_of_match[] = {
+ { .compatible = "xlnx,axi-quad-spi-1.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.b", },
{}
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index d8e4219..71c7376 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -32,7 +32,7 @@ config SSB_BLOCKIO
config SSB_PCIHOST_POSSIBLE
bool
- depends on SSB && (PCI = y || PCI = SSB)
+ depends on SSB && (PCI = y || PCI = SSB) && PCI_DRIVERS_LEGACY
default y
config SSB_PCIHOST
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 0f695df..372ce99 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -765,10 +765,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case ASHMEM_SET_SIZE:
ret = -EINVAL;
+ mutex_lock(&ashmem_mutex);
if (!asma->file) {
ret = 0;
asma->size = (size_t)arg;
}
+ mutex_unlock(&ashmem_mutex);
break;
case ASHMEM_GET_SIZE:
ret = asma->size;
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index a517b2d..8f64941 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -37,7 +37,7 @@ config ION_CHUNK_HEAP
config ION_CMA_HEAP
bool "Ion CMA heap support"
- depends on ION && CMA
+ depends on ION && DMA_CMA
help
Choose this option to enable CMA heaps with Ion. This heap is backed
by the Contiguous Memory Allocator (CMA). If your system has these
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index a7d9b0e..f480885 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -346,7 +346,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
- DMA_BIDIRECTIONAL);
+ direction);
}
mutex_unlock(&buffer->lock);
@@ -368,7 +368,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
- DMA_BIDIRECTIONAL);
+ direction);
}
mutex_unlock(&buffer->lock);
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index dd5545d..86196ff 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -39,9 +39,15 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
struct ion_cma_heap *cma_heap = to_cma_heap(heap);
struct sg_table *table;
struct page *pages;
+ unsigned long size = PAGE_ALIGN(len);
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ unsigned long align = get_order(size);
int ret;
- pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL);
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+ pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
if (!pages)
return -ENOMEM;
@@ -53,7 +59,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
if (ret)
goto free_mem;
- sg_set_page(table->sgl, pages, len, 0);
+ sg_set_page(table->sgl, pages, size, 0);
buffer->priv_virt = pages;
buffer->sg_table = table;
@@ -62,7 +68,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
free_mem:
kfree(table);
err:
- cma_release(cma_heap->cma, pages, buffer->size);
+ cma_release(cma_heap->cma, pages, nr_pages);
return -ENOMEM;
}
@@ -70,9 +76,10 @@ static void ion_cma_free(struct ion_buffer *buffer)
{
struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
struct page *pages = buffer->priv_virt;
+ unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
/* release memory */
- cma_release(cma_heap->cma, pages, buffer->size);
+ cma_release(cma_heap->cma, pages, nr_pages);
/* release sg table */
sg_free_table(buffer->sg_table);
kfree(buffer->sg_table);
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index 1799d3f..2035835 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -1769,7 +1769,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ahash_req_ctx *state = ahash_request_ctx(req);
u32 tmp;
- int rc;
+ int rc = 0;
memcpy(&tmp, in, sizeof(u32));
if (tmp != CC_EXPORT_MAGIC) {
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 986c2a4..8267119 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -487,21 +487,18 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
ksocknal_nid2peerlist(id.nid));
}
- route2 = NULL;
list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) {
- if (route2->ksnr_ipaddr == ipaddr)
- break;
-
- route2 = NULL;
- }
- if (!route2) {
- ksocknal_add_route_locked(peer, route);
- route->ksnr_share_count++;
- } else {
- ksocknal_route_decref(route);
- route2->ksnr_share_count++;
+ if (route2->ksnr_ipaddr == ipaddr) {
+ /* Route already exists, use the old one */
+ ksocknal_route_decref(route);
+ route2->ksnr_share_count++;
+ goto out;
+ }
}
-
+ /* Route doesn't already exist, add the new one */
+ ksocknal_add_route_locked(peer, route);
+ route->ksnr_share_count++;
+out:
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return 0;
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 5b2e47c..6f59045 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -369,8 +369,6 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
}
ctx->pos = pos;
ll_finish_md_op_data(op_data);
- filp->f_version = inode->i_version;
-
out:
if (!rc)
ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1);
@@ -1678,7 +1676,6 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
else
fd->lfd_pos = offset;
file->f_pos = offset;
- file->f_version = 0;
}
ret = offset;
}
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 87595c5..264ad36 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -637,8 +637,7 @@ static int spinand_write_page_hwecc(struct mtd_info *mtd,
int eccsteps = chip->ecc.steps;
enable_hw_ecc = 1;
- chip->write_buf(mtd, p, eccsize * eccsteps);
- return 0;
+ return nand_prog_page_op(chip, page, 0, p, eccsize * eccsteps);
}
static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
@@ -653,7 +652,7 @@ static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
enable_read_hw_ecc = 1;
- chip->read_buf(mtd, p, eccsize * eccsteps);
+ nand_read_page_op(chip, page, 0, p, eccsize * eccsteps);
if (oob_required)
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index e69a215..12c9df9 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -102,7 +102,7 @@ enum modulation rf69_get_modulation(struct spi_device *spi)
currentValue = READ_REG(REG_DATAMODUL);
- switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE >> 3) { // TODO improvement: change 3 to define
+ switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE) {
case DATAMODUL_MODULATION_TYPE_OOK: return OOK;
case DATAMODUL_MODULATION_TYPE_FSK: return FSK;
default: return undefined;
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index e2bc999..4c44d7b 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -5,6 +5,7 @@ menuconfig TARGET_CORE
select CONFIGFS_FS
select CRC_T10DIF
select BLK_SCSI_REQUEST # only for scsi_command_size_tbl..
+ select SGL_ALLOC
default n
help
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7c69b4a..0d99b24 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -920,7 +920,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
" %d i: %d bio: %p, allocating another"
" bio\n", bio->bi_vcnt, i, bio);
- rc = blk_rq_append_bio(req, bio);
+ rc = blk_rq_append_bio(req, &bio);
if (rc) {
pr_err("pSCSI: failed to append bio\n");
goto fail;
@@ -938,7 +938,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
if (bio) {
- rc = blk_rq_append_bio(req, bio);
+ rc = blk_rq_append_bio(req, &bio);
if (rc) {
pr_err("pSCSI: failed to append bio\n");
goto fail;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 58caacd..c03a78e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2300,13 +2300,7 @@ queue_full:
void target_free_sgl(struct scatterlist *sgl, int nents)
{
- struct scatterlist *sg;
- int count;
-
- for_each_sg(sgl, sg, nents, count)
- __free_page(sg_page(sg));
-
- kfree(sgl);
+ sgl_free_n_order(sgl, nents, 0);
}
EXPORT_SYMBOL(target_free_sgl);
@@ -2414,42 +2408,10 @@ int
target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
bool zero_page, bool chainable)
{
- struct scatterlist *sg;
- struct page *page;
- gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
- unsigned int nalloc, nent;
- int i = 0;
-
- nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
- if (chainable)
- nalloc++;
- sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
- if (!sg)
- return -ENOMEM;
+ gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
- sg_init_table(sg, nalloc);
-
- while (length) {
- u32 page_len = min_t(u32, length, PAGE_SIZE);
- page = alloc_page(GFP_KERNEL | zero_flag);
- if (!page)
- goto out;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
- *sgl = sg;
- *nents = nent;
- return 0;
-
-out:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
- return -ENOMEM;
+ *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
+ return *sgl ? 0 : -ENOMEM;
}
EXPORT_SYMBOL(target_alloc_sgl);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index dc63aba..dfd2324 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -88,7 +88,6 @@ struct time_in_idle {
* @policy: cpufreq policy.
* @node: list_head to link all cpufreq_cooling_device together.
* @idle_time: idle time stats
- * @plat_get_static_power: callback to calculate the static power
*
* This structure is required for keeping information of each registered
* cpufreq_cooling_device.
@@ -104,7 +103,6 @@ struct cpufreq_cooling_device {
struct cpufreq_policy *policy;
struct list_head node;
struct time_in_idle *idle_time;
- get_static_t plat_get_static_power;
};
static DEFINE_IDA(cpufreq_ida);
@@ -319,60 +317,6 @@ static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
}
/**
- * get_static_power() - calculate the static power consumed by the cpus
- * @cpufreq_cdev: struct &cpufreq_cooling_device for this cpu cdev
- * @tz: thermal zone device in which we're operating
- * @freq: frequency in KHz
- * @power: pointer in which to store the calculated static power
- *
- * Calculate the static power consumed by the cpus described by
- * @cpu_actor running at frequency @freq. This function relies on a
- * platform specific function that should have been provided when the
- * actor was registered. If it wasn't, the static power is assumed to
- * be negligible. The calculated static power is stored in @power.
- *
- * Return: 0 on success, -E* on failure.
- */
-static int get_static_power(struct cpufreq_cooling_device *cpufreq_cdev,
- struct thermal_zone_device *tz, unsigned long freq,
- u32 *power)
-{
- struct dev_pm_opp *opp;
- unsigned long voltage;
- struct cpufreq_policy *policy = cpufreq_cdev->policy;
- struct cpumask *cpumask = policy->related_cpus;
- unsigned long freq_hz = freq * 1000;
- struct device *dev;
-
- if (!cpufreq_cdev->plat_get_static_power) {
- *power = 0;
- return 0;
- }
-
- dev = get_cpu_device(policy->cpu);
- WARN_ON(!dev);
-
- opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
- if (IS_ERR(opp)) {
- dev_warn_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
- freq_hz, PTR_ERR(opp));
- return -EINVAL;
- }
-
- voltage = dev_pm_opp_get_voltage(opp);
- dev_pm_opp_put(opp);
-
- if (voltage == 0) {
- dev_err_ratelimited(dev, "Failed to get voltage for frequency %lu\n",
- freq_hz);
- return -EINVAL;
- }
-
- return cpufreq_cdev->plat_get_static_power(cpumask, tz->passive_delay,
- voltage, power);
-}
-
-/**
* get_dynamic_power() - calculate the dynamic power
* @cpufreq_cdev: &cpufreq_cooling_device for this cdev
* @freq: current frequency
@@ -491,8 +435,8 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
u32 *power)
{
unsigned long freq;
- int i = 0, cpu, ret;
- u32 static_power, dynamic_power, total_load = 0;
+ int i = 0, cpu;
+ u32 total_load = 0;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
u32 *load_cpu = NULL;
@@ -522,22 +466,15 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
cpufreq_cdev->last_load = total_load;
- dynamic_power = get_dynamic_power(cpufreq_cdev, freq);
- ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
- if (ret) {
- kfree(load_cpu);
- return ret;
- }
+ *power = get_dynamic_power(cpufreq_cdev, freq);
if (load_cpu) {
trace_thermal_power_cpu_get_power(policy->related_cpus, freq,
- load_cpu, i, dynamic_power,
- static_power);
+ load_cpu, i, *power);
kfree(load_cpu);
}
- *power = static_power + dynamic_power;
return 0;
}
@@ -561,8 +498,6 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
unsigned long state, u32 *power)
{
unsigned int freq, num_cpus;
- u32 static_power, dynamic_power;
- int ret;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
/* Request state should be less than max_level */
@@ -572,13 +507,9 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
freq = cpufreq_cdev->freq_table[state].frequency;
- dynamic_power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
- ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
- if (ret)
- return ret;
+ *power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
- *power = static_power + dynamic_power;
- return ret;
+ return 0;
}
/**
@@ -606,21 +537,14 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
unsigned int cur_freq, target_freq;
- int ret;
- s32 dyn_power;
- u32 last_load, normalised_power, static_power;
+ u32 last_load, normalised_power;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
cur_freq = cpufreq_quick_get(policy->cpu);
- ret = get_static_power(cpufreq_cdev, tz, cur_freq, &static_power);
- if (ret)
- return ret;
-
- dyn_power = power - static_power;
- dyn_power = dyn_power > 0 ? dyn_power : 0;
+ power = power > 0 ? power : 0;
last_load = cpufreq_cdev->last_load ?: 1;
- normalised_power = (dyn_power * 100) / last_load;
+ normalised_power = (power * 100) / last_load;
target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
*state = get_level(cpufreq_cdev, target_freq);
@@ -671,8 +595,6 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
* @policy: cpufreq policy
* Normally this should be same as cpufreq policy->related_cpus.
* @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -684,8 +606,7 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
*/
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy, u32 capacitance,
- get_static_t plat_static_func)
+ struct cpufreq_policy *policy, u32 capacitance)
{
struct thermal_cooling_device *cdev;
struct cpufreq_cooling_device *cpufreq_cdev;
@@ -755,8 +676,6 @@ __cpufreq_cooling_register(struct device_node *np,
}
if (capacitance) {
- cpufreq_cdev->plat_get_static_power = plat_static_func;
-
ret = update_freq_table(cpufreq_cdev, capacitance);
if (ret) {
cdev = ERR_PTR(ret);
@@ -813,13 +732,12 @@ free_cdev:
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy)
{
- return __cpufreq_cooling_register(NULL, policy, 0, NULL);
+ return __cpufreq_cooling_register(NULL, policy, 0);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
/**
* of_cpufreq_cooling_register - function to create cpufreq cooling device.
- * @np: a valid struct device_node to the cooling device device tree node
* @policy: cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
@@ -827,86 +745,45 @@ EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
* cooling devices. Using this API, the cpufreq cooling device will be
* linked to the device tree node provided.
*
- * Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
- */
-struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
-{
- if (!np)
- return ERR_PTR(-EINVAL);
-
- return __cpufreq_cooling_register(np, policy, 0, NULL);
-}
-EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
-
-/**
- * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
- * @policy: cpufreq policy
- * @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
- *
- * This interface function registers the cpufreq cooling device with
- * the name "thermal-cpufreq-%x". This api can support multiple
- * instances of cpufreq cooling devices. Using this function, the
- * cooling device will implement the power extensions by using a
- * simple cpu power model. The cpus must have registered their OPPs
- * using the OPP library.
- *
- * An optional @plat_static_func may be provided to calculate the
- * static power consumed by these cpus. If the platform's static
- * power consumption is unknown or negligible, make it NULL.
- *
- * Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
- */
-struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy, u32 capacitance,
- get_static_t plat_static_func)
-{
- return __cpufreq_cooling_register(NULL, policy, capacitance,
- plat_static_func);
-}
-EXPORT_SYMBOL(cpufreq_power_cooling_register);
-
-/**
- * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
- * @np: a valid struct device_node to the cooling device device tree node
- * @policy: cpufreq policy
- * @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
- *
- * This interface function registers the cpufreq cooling device with
- * the name "thermal-cpufreq-%x". This api can support multiple
- * instances of cpufreq cooling devices. Using this API, the cpufreq
- * cooling device will be linked to the device tree node provided.
* Using this function, the cooling device will implement the power
* extensions by using a simple cpu power model. The cpus must have
* registered their OPPs using the OPP library.
*
- * An optional @plat_static_func may be provided to calculate the
- * static power consumed by these cpus. If the platform's static
- * power consumption is unknown or negligible, make it NULL.
+ * It also takes into account, if property present in policy CPU node, the
+ * static power consumed by the cpu.
*
* Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
+ * and NULL on failure.
*/
struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
+of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
- if (!np)
- return ERR_PTR(-EINVAL);
+ struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
+ struct thermal_cooling_device *cdev = NULL;
+ u32 capacitance = 0;
+
+ if (!np) {
+ pr_err("cpu_cooling: OF node not available for cpu%d\n",
+ policy->cpu);
+ return NULL;
+ }
+
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ of_property_read_u32(np, "dynamic-power-coefficient",
+ &capacitance);
- return __cpufreq_cooling_register(np, policy, capacitance,
- plat_static_func);
+ cdev = __cpufreq_cooling_register(np, policy, capacitance);
+ if (IS_ERR(cdev)) {
+ pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n",
+ policy->cpu, PTR_ERR(cdev));
+ cdev = NULL;
+ }
+ }
+
+ of_node_put(np);
+ return cdev;
}
-EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
+EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 419a7a9..f45bcbc 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -339,7 +339,7 @@ static void __ring_interrupt(struct tb_ring *ring)
return;
if (ring->start_poll) {
- __ring_interrupt_mask(ring, false);
+ __ring_interrupt_mask(ring, true);
ring->start_poll(ring->poll_data);
} else {
schedule_work(&ring->work);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 427e0d5..539b49a 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1762,7 +1762,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
struct n_tty_data *ldata = tty->disc_data;
- if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
+ if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) {
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
ldata->line_start = ldata->read_tail;
if (!L_ICANON(tty) || !read_cnt(ldata)) {
@@ -2425,7 +2425,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
case TIOCINQ:
down_write(&tty->termios_rwsem);
- if (L_ICANON(tty))
+ if (L_ICANON(tty) && !L_EXTPROC(tty))
retval = inq_canon(ldata);
else
retval = read_cnt(ldata);
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 1bef398..28133db 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -132,6 +132,33 @@ void serdev_device_close(struct serdev_device *serdev)
}
EXPORT_SYMBOL_GPL(serdev_device_close);
+static void devm_serdev_device_release(struct device *dev, void *dr)
+{
+ serdev_device_close(*(struct serdev_device **)dr);
+}
+
+int devm_serdev_device_open(struct device *dev, struct serdev_device *serdev)
+{
+ struct serdev_device **dr;
+ int ret;
+
+ dr = devres_alloc(devm_serdev_device_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ ret = serdev_device_open(serdev);
+ if (ret) {
+ devres_free(dr);
+ return ret;
+ }
+
+ *dr = serdev;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_serdev_device_open);
+
void serdev_device_write_wakeup(struct serdev_device *serdev)
{
complete(&serdev->write_comp);
@@ -268,8 +295,8 @@ static int serdev_drv_probe(struct device *dev)
static int serdev_drv_remove(struct device *dev)
{
const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
-
- sdrv->remove(to_serdev_device(dev));
+ if (sdrv->remove)
+ sdrv->remove(to_serdev_device(dev));
return 0;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index 3593ce0..8800099 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -247,7 +247,7 @@ static int ci_hdrc_msm_probe(struct platform_device *pdev)
if (ret)
goto err_mux;
- ulpi_node = of_find_node_by_name(of_node_get(pdev->dev.of_node), "ulpi");
+ ulpi_node = of_get_child_by_name(pdev->dev.of_node, "ulpi");
if (ulpi_node) {
phy_node = of_get_next_available_child(ulpi_node, NULL);
ci->hsic = of_device_is_compatible(phy_node, "qcom,usb-hsic-phy");
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 55b198b..c821b4b 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -555,6 +555,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
unsigned iad_num = 0;
memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
+ nintf = nintf_orig = config->desc.bNumInterfaces;
+ config->desc.bNumInterfaces = 0; // Adjusted later
+
if (config->desc.bDescriptorType != USB_DT_CONFIG ||
config->desc.bLength < USB_DT_CONFIG_SIZE ||
config->desc.bLength > size) {
@@ -568,7 +571,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
buffer += config->desc.bLength;
size -= config->desc.bLength;
- nintf = nintf_orig = config->desc.bNumInterfaces;
if (nintf > USB_MAXINTERFACES) {
dev_warn(ddev, "config %d has too many interfaces: %d, "
"using maximum allowed: %d\n",
@@ -1005,7 +1007,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
case USB_SSP_CAP_TYPE:
ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
- USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
+ USB_SSP_SUBLINK_SPEED_ATTRIBS);
if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
dev->bos->ssp_cap = ssp_cap;
break;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index a10b346..4024926 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -52,10 +52,11 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
- /* Logitech HD Pro Webcams C920, C920-C and C930e */
+ /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech ConferenceCam CC3000e */
{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -149,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
{ USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
+ /* ELSA MicroLink 56K */
+ { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index f66c9413..31749c7 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -537,6 +537,7 @@ struct dwc2_core_params {
* 2 - Internal DMA
* @power_optimized Are power optimizations enabled?
* @num_dev_ep Number of device endpoints available
+ * @num_dev_in_eps Number of device IN endpoints available
* @num_dev_perio_in_ep Number of device periodic IN endpoints
* available
* @dev_token_q_depth Device Mode IN Token Sequence Learning Queue
@@ -565,6 +566,7 @@ struct dwc2_core_params {
* 2 - 8 or 16 bits
* @snpsid: Value from SNPSID register
* @dev_ep_dirs: Direction of device endpoints (GHWCFG1)
+ * @g_tx_fifo_size[] Power-on values of TxFIFO sizes
*/
struct dwc2_hw_params {
unsigned op_mode:3;
@@ -586,12 +588,14 @@ struct dwc2_hw_params {
unsigned fs_phy_type:2;
unsigned i2c_enable:1;
unsigned num_dev_ep:4;
+ unsigned num_dev_in_eps : 4;
unsigned num_dev_perio_in_ep:4;
unsigned total_fifo_size:16;
unsigned power_optimized:1;
unsigned utmi_phy_data_width:2;
u32 snpsid;
u32 dev_ep_dirs;
+ u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
};
/* Size of control and EP0 buffers */
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 88529d0..e4c3ce0 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -195,55 +195,18 @@ int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
{
if (hsotg->hw_params.en_multiple_tx_fifo)
/* In dedicated FIFO mode we need count of IN EPs */
- return (dwc2_readl(hsotg->regs + GHWCFG4) &
- GHWCFG4_NUM_IN_EPS_MASK) >> GHWCFG4_NUM_IN_EPS_SHIFT;
+ return hsotg->hw_params.num_dev_in_eps;
else
/* In shared FIFO mode we need count of Periodic IN EPs */
return hsotg->hw_params.num_dev_perio_in_ep;
}
/**
- * dwc2_hsotg_ep_info_size - return Endpoint Info Control block size in DWORDs
- */
-static int dwc2_hsotg_ep_info_size(struct dwc2_hsotg *hsotg)
-{
- int val = 0;
- int i;
- u32 ep_dirs;
-
- /*
- * Don't need additional space for ep info control registers in
- * slave mode.
- */
- if (!using_dma(hsotg)) {
- dev_dbg(hsotg->dev, "Buffer DMA ep info size 0\n");
- return 0;
- }
-
- /*
- * Buffer DMA mode - 1 location per endpoit
- * Descriptor DMA mode - 4 locations per endpoint
- */
- ep_dirs = hsotg->hw_params.dev_ep_dirs;
-
- for (i = 0; i <= hsotg->hw_params.num_dev_ep; i++) {
- val += ep_dirs & 3 ? 1 : 2;
- ep_dirs >>= 2;
- }
-
- if (using_desc_dma(hsotg))
- val = val * 4;
-
- return val;
-}
-
-/**
* dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
* device mode TX FIFOs
*/
int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
{
- int ep_info_size;
int addr;
int tx_addr_max;
u32 np_tx_fifo_size;
@@ -252,8 +215,7 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
hsotg->params.g_np_tx_fifo_size);
/* Get Endpoint Info Control block size in DWORDs. */
- ep_info_size = dwc2_hsotg_ep_info_size(hsotg);
- tx_addr_max = hsotg->hw_params.total_fifo_size - ep_info_size;
+ tx_addr_max = hsotg->hw_params.total_fifo_size;
addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
if (tx_addr_max <= addr)
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index ef73af6..03fd20f 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -484,8 +484,7 @@ static void dwc2_check_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
}
for (fifo = 1; fifo <= fifo_count; fifo++) {
- dptxfszn = (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) &
- FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
+ dptxfszn = hsotg->hw_params.g_tx_fifo_size[fifo];
if (hsotg->params.g_tx_fifo_size[fifo] < min ||
hsotg->params.g_tx_fifo_size[fifo] > dptxfszn) {
@@ -609,6 +608,7 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
struct dwc2_hw_params *hw = &hsotg->hw_params;
bool forced;
u32 gnptxfsiz;
+ int fifo, fifo_count;
if (hsotg->dr_mode == USB_DR_MODE_HOST)
return;
@@ -617,6 +617,14 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
+ fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
+
+ for (fifo = 1; fifo <= fifo_count; fifo++) {
+ hw->g_tx_fifo_size[fifo] =
+ (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) &
+ FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
+ }
+
if (forced)
dwc2_clear_force_mode(hsotg);
@@ -661,14 +669,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
- /*
- * Host specific hardware parameters. Reading these parameters
- * requires the controller to be in host mode. The mode will
- * be forced, if necessary, to read these values.
- */
- dwc2_get_host_hwparams(hsotg);
- dwc2_get_dev_hwparams(hsotg);
-
/* hwcfg1 */
hw->dev_ep_dirs = hwcfg1;
@@ -711,6 +711,8 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
+ hw->num_dev_in_eps = (hwcfg4 & GHWCFG4_NUM_IN_EPS_MASK) >>
+ GHWCFG4_NUM_IN_EPS_SHIFT;
hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
@@ -719,6 +721,13 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
/* fifo sizes */
hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
GRXFSIZ_DEPTH_SHIFT;
+ /*
+ * Host specific hardware parameters. Reading these parameters
+ * requires the controller to be in host mode. The mode will
+ * be forced, if necessary, to read these values.
+ */
+ dwc2_get_host_hwparams(hsotg);
+ dwc2_get_dev_hwparams(hsotg);
return 0;
}
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index c4a4d7b..7ae0eef 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -51,8 +51,10 @@ static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
clk = of_clk_get(np, i);
if (IS_ERR(clk)) {
- while (--i >= 0)
+ while (--i >= 0) {
+ clk_disable_unprepare(simple->clks[i]);
clk_put(simple->clks[i]);
+ }
return PTR_ERR(clk);
}
@@ -203,6 +205,7 @@ static struct platform_driver dwc3_of_simple_driver = {
.driver = {
.name = "dwc3-of-simple",
.of_match_table = of_dwc3_simple_match,
+ .pm = &dwc3_of_simple_dev_pm_ops,
},
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 981fd98..639dd1b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -259,7 +259,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
- u32 timeout = 500;
+ u32 timeout = 1000;
u32 reg;
int cmd_status = 0;
@@ -912,7 +912,7 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
*/
if (speed == USB_SPEED_HIGH) {
struct usb_ep *ep = &dep->endpoint;
- unsigned int mult = ep->mult - 1;
+ unsigned int mult = 2;
unsigned int maxp = usb_endpoint_maxp(ep->desc);
if (length <= (2 * maxp))
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 0a19a76..31cce78 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -508,8 +508,8 @@ choice
controller, and the relevant drivers for each function declared
by the device.
-endchoice
-
source "drivers/usb/gadget/legacy/Kconfig"
+endchoice
+
endif # USB_GADGET
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index c5bce8e..5780fba 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -73,9 +73,7 @@ struct f_ncm {
struct sk_buff *skb_tx_ndp;
u16 ndp_dgram_count;
bool timer_force_tx;
- struct tasklet_struct tx_tasklet;
struct hrtimer task_timer;
-
bool timer_stopping;
};
@@ -1104,7 +1102,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
/* Delay the timer. */
hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
- HRTIMER_MODE_REL);
+ HRTIMER_MODE_REL_SOFT);
/* Add the datagram position entries */
ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
@@ -1148,17 +1146,15 @@ err:
}
/*
- * This transmits the NTB if there are frames waiting.
+ * The transmit should only be run if no skb data has been sent
+ * for a certain duration.
*/
-static void ncm_tx_tasklet(unsigned long data)
+static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
{
- struct f_ncm *ncm = (void *)data;
-
- if (ncm->timer_stopping)
- return;
+ struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
/* Only send if data is available. */
- if (ncm->skb_tx_data) {
+ if (!ncm->timer_stopping && ncm->skb_tx_data) {
ncm->timer_force_tx = true;
/* XXX This allowance of a NULL skb argument to ndo_start_xmit
@@ -1171,16 +1167,6 @@ static void ncm_tx_tasklet(unsigned long data)
ncm->timer_force_tx = false;
}
-}
-
-/*
- * The transmit should only be run if no skb data has been sent
- * for a certain duration.
- */
-static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
-{
- struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
- tasklet_schedule(&ncm->tx_tasklet);
return HRTIMER_NORESTART;
}
@@ -1513,8 +1499,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm->port.open = ncm_open;
ncm->port.close = ncm_close;
- tasklet_init(&ncm->tx_tasklet, ncm_tx_tasklet, (unsigned long) ncm);
- hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ncm->task_timer.function = ncm_tx_timeout;
DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",
@@ -1623,7 +1608,6 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(c->cdev, "ncm unbind\n");
hrtimer_cancel(&ncm->task_timer);
- tasklet_kill(&ncm->tx_tasklet);
ncm_string_defs[0].id = 0;
usb_free_all_descriptors(f);
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 9570bbe..784bf86 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -13,14 +13,6 @@
# both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG).
#
-menuconfig USB_GADGET_LEGACY
- bool "Legacy USB Gadget Support"
- help
- Legacy USB gadgets are USB gadgets that do not use the USB gadget
- configfs interface.
-
-if USB_GADGET_LEGACY
-
config USB_ZERO
tristate "Gadget Zero (DEVELOPMENT)"
select USB_LIBCOMPOSITE
@@ -487,7 +479,7 @@ endif
# or video class gadget drivers), or specific hardware, here.
config USB_G_WEBCAM
tristate "USB Webcam Gadget"
- depends on VIDEO_DEV
+ depends on VIDEO_V4L2
select USB_LIBCOMPOSITE
select VIDEOBUF2_VMALLOC
select USB_F_UVC
@@ -498,5 +490,3 @@ config USB_G_WEBCAM
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "g_webcam".
-
-endif
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 93eff7de..1b3efb1 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1147,11 +1147,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
udc = kzalloc(sizeof(*udc), GFP_KERNEL);
if (!udc)
- goto err1;
-
- ret = device_add(&gadget->dev);
- if (ret)
- goto err2;
+ goto err_put_gadget;
device_initialize(&udc->dev);
udc->dev.release = usb_udc_release;
@@ -1160,7 +1156,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
udc->dev.parent = parent;
ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
if (ret)
- goto err3;
+ goto err_put_udc;
+
+ ret = device_add(&gadget->dev);
+ if (ret)
+ goto err_put_udc;
udc->gadget = gadget;
gadget->udc = udc;
@@ -1170,7 +1170,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
ret = device_add(&udc->dev);
if (ret)
- goto err4;
+ goto err_unlist_udc;
usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
udc->vbus = true;
@@ -1178,27 +1178,25 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
/* pick up one of pending gadget drivers */
ret = check_pending_gadget_drivers(udc);
if (ret)
- goto err5;
+ goto err_del_udc;
mutex_unlock(&udc_lock);
return 0;
-err5:
+ err_del_udc:
device_del(&udc->dev);
-err4:
+ err_unlist_udc:
list_del(&udc->list);
mutex_unlock(&udc_lock);
-err3:
- put_device(&udc->dev);
device_del(&gadget->dev);
-err2:
- kfree(udc);
+ err_put_udc:
+ put_device(&udc->dev);
-err1:
+ err_put_gadget:
put_device(&gadget->dev);
return ret;
}
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 4f7895d..e26e685 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -162,7 +162,7 @@ static void xhci_debugfs_extcap_regset(struct xhci_hcd *xhci, int cap_id,
static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
{
dma_addr_t dma;
- struct xhci_ring *ring = s->private;
+ struct xhci_ring *ring = *(struct xhci_ring **)s->private;
dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
seq_printf(s, "%pad\n", &dma);
@@ -173,7 +173,7 @@ static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
{
dma_addr_t dma;
- struct xhci_ring *ring = s->private;
+ struct xhci_ring *ring = *(struct xhci_ring **)s->private;
dma = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
seq_printf(s, "%pad\n", &dma);
@@ -183,7 +183,7 @@ static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
static int xhci_ring_cycle_show(struct seq_file *s, void *unused)
{
- struct xhci_ring *ring = s->private;
+ struct xhci_ring *ring = *(struct xhci_ring **)s->private;
seq_printf(s, "%d\n", ring->cycle_state);
@@ -346,7 +346,7 @@ static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
}
static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci,
- struct xhci_ring *ring,
+ struct xhci_ring **ring,
const char *name,
struct dentry *parent)
{
@@ -387,7 +387,7 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
epriv->root = xhci_debugfs_create_ring_dir(xhci,
- dev->eps[ep_index].new_ring,
+ &dev->eps[ep_index].new_ring,
epriv->name,
spriv->root);
spriv->eps[ep_index] = epriv;
@@ -423,7 +423,7 @@ void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id)
priv->dev = dev;
dev->debugfs_private = priv;
- xhci_debugfs_create_ring_dir(xhci, dev->eps[0].ring,
+ xhci_debugfs_create_ring_dir(xhci, &dev->eps[0].ring,
"ep00", priv->root);
xhci_debugfs_create_context_files(xhci, priv->root, slot_id);
@@ -488,11 +488,11 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
ARRAY_SIZE(xhci_extcap_dbc),
"reg-ext-dbc");
- xhci_debugfs_create_ring_dir(xhci, xhci->cmd_ring,
+ xhci_debugfs_create_ring_dir(xhci, &xhci->cmd_ring,
"command-ring",
xhci->debugfs_root);
- xhci_debugfs_create_ring_dir(xhci, xhci->event_ring,
+ xhci_debugfs_create_ring_dir(xhci, &xhci->event_ring,
"event-ring",
xhci->debugfs_root);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 15f7d42..3a29b32 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -971,10 +971,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
return 0;
}
- xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
- if (!xhci->devs[slot_id])
+ dev = kzalloc(sizeof(*dev), flags);
+ if (!dev)
return 0;
- dev = xhci->devs[slot_id];
/* Allocate the (output) device context that will be used in the HC. */
dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
@@ -1015,9 +1014,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
trace_xhci_alloc_virt_device(dev);
+ xhci->devs[slot_id] = dev;
+
return 1;
fail:
- xhci_free_virt_device(xhci, slot_id);
+
+ if (dev->in_ctx)
+ xhci_free_container_ctx(xhci, dev->in_ctx);
+ if (dev->out_ctx)
+ xhci_free_container_ctx(xhci, dev->out_ctx);
+ kfree(dev);
+
return 0;
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7ef1274..1aad89b 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -178,6 +178,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_BROKEN_STREAMS;
}
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ pdev->device == 0x0014)
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0015)
xhci->quirks |= XHCI_RESET_ON_RESUME;
if (pdev->vendor == PCI_VENDOR_ID_VIA)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6eb87c6..c5cbc68 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3112,7 +3112,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
{
u32 maxp, total_packet_count;
- /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
+ /* MTK xHCI 0.96 contains some features from 1.0 */
if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
return ((td_total_len - transferred) >> 10);
@@ -3121,8 +3121,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
trb_buff_len == td_total_len)
return 0;
- /* for MTK xHCI, TD size doesn't include this TRB */
- if (xhci->quirks & XHCI_MTK_HOST)
+ /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
+ if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
trb_buff_len = 0;
maxp = usb_endpoint_maxp(&urb->ep->desc);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2424d30..da6dbe3 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3525,8 +3525,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_slot_ctx *slot_ctx;
int i, ret;
- xhci_debugfs_remove_slot(xhci, udev->slot_id);
-
#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
* We called pm_runtime_get_noresume when the device was attached.
@@ -3555,8 +3553,10 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
}
ret = xhci_disable_slot(xhci, udev->slot_id);
- if (ret)
+ if (ret) {
+ xhci_debugfs_remove_slot(xhci, udev->slot_id);
xhci_free_virt_device(xhci, udev->slot_id);
+ }
}
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 465dbf6..f723f7b 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -279,6 +279,8 @@ static int usb3503_probe(struct usb3503 *hub)
if (gpio_is_valid(hub->gpio_reset)) {
err = devm_gpio_request_one(dev, hub->gpio_reset,
GPIOF_OUT_INIT_LOW, "usb3503 reset");
+ /* Datasheet defines a hardware reset to be at least 100us */
+ usleep_range(100, 10000);
if (err) {
dev_err(dev,
"unable to request GPIO %d as reset pin (%d)\n",
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f6ae753..f932f40 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1004,7 +1004,9 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
break;
case MON_IOCQ_RING_SIZE:
+ mutex_lock(&rp->fetch_lock);
ret = rp->b_size;
+ mutex_unlock(&rp->fetch_lock);
break;
case MON_IOCT_RING_SIZE:
@@ -1231,12 +1233,16 @@ static int mon_bin_vma_fault(struct vm_fault *vmf)
unsigned long offset, chunk_idx;
struct page *pageptr;
+ mutex_lock(&rp->fetch_lock);
offset = vmf->pgoff << PAGE_SHIFT;
- if (offset >= rp->b_size)
+ if (offset >= rp->b_size) {
+ mutex_unlock(&rp->fetch_lock);
return VM_FAULT_SIGBUS;
+ }
chunk_idx = offset / CHUNK_SIZE;
pageptr = rp->b_vec[chunk_idx].pg;
get_page(pageptr);
+ mutex_unlock(&rp->fetch_lock);
vmf->page = pageptr;
return 0;
}
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 0397606..6c036de 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -284,7 +284,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
del_timer(&musb->dev_timer);
- } else {
+ } else if (!(musb->int_usb & MUSB_INTR_BABBLE)) {
+ /*
+ * When babble condition happens, drvvbus interrupt
+ * is also generated. Ignore this drvvbus interrupt
+ * and let babble interrupt handler recovers the
+ * controller; otherwise, the host-mode flag is lost
+ * due to the MUSB_DEV_MODE() call below and babble
+ * recovery logic will not be called.
+ */
musb->is_active = 0;
MUSB_DEV_MODE(musb);
otg->default_a = 0;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 7c6273b..06d502b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+ { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -174,6 +175,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ { USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */
{ USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1aba910..fc68952 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1013,6 +1013,7 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
+ { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4faa09f..8b4ecd2 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -915,6 +915,12 @@
#define ICPDAS_I7563U_PID 0x0105
/*
+ * Airbus Defence and Space
+ */
+#define AIRBUS_DS_VID 0x1e8e /* Vendor ID */
+#define AIRBUS_DS_P8GR 0x6001 /* Tetra P8GR */
+
+/*
* RT Systems programming cables for various ham radios
*/
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3b35138..b6320e3 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -233,6 +233,8 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Qualcomm's vendor ID */
#define QUECTEL_PRODUCT_UC20 0x9003
#define QUECTEL_PRODUCT_UC15 0x9090
+/* These Yuga products use Qualcomm's vendor ID */
+#define YUGA_PRODUCT_CLM920_NC5 0x9625
#define QUECTEL_VENDOR_ID 0x2c7c
/* These Quectel products use Quectel's vendor ID */
@@ -280,6 +282,7 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
#define TELIT_PRODUCT_ME910 0x1100
+#define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
@@ -645,6 +648,11 @@ static const struct option_blacklist_info telit_me910_blacklist = {
.reserved = BIT(1) | BIT(3),
};
+static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(3),
+};
+
static const struct option_blacklist_info telit_le910_blacklist = {
.sendsetup = BIT(0),
.reserved = BIT(1) | BIT(2),
@@ -674,6 +682,10 @@ static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
.reserved = BIT(4) | BIT(5),
};
+static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
+ .reserved = BIT(1) | BIT(4),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1178,6 +1190,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ /* Yuga products use Qualcomm vendor ID */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
+ .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
@@ -1244,6 +1259,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = (kernel_ulong_t)&telit_me910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+ .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index e389254..613f91a 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */
{DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
+ {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
+ {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
@@ -342,6 +344,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
break;
case 2:
dev_dbg(dev, "NMEA GPS interface found\n");
+ sendsetup = true;
break;
case 3:
dev_dbg(dev, "Modem port found\n");
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 2968046..f72d045 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2100,6 +2100,13 @@ UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA ),
+/* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */
+UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+ "JMicron",
+ "JMS567",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA),
+
/*
* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
* JMicron responds to USN and several other SCSI ioctls with a
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index d520374..a7d08ae 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -129,6 +129,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
+/* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */
+UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+ "JMicron",
+ "JMS567",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
@@ -136,6 +143,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Icenowy Zheng <icenowy@aosc.io> */
+UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
+ "Norelsys",
+ "NS1068X",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
"JMicron",
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index a3df8ee..e31a6f2 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -149,8 +149,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
* step 1?
*/
if (ud->tcp_socket) {
- dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n",
- ud->tcp_socket);
+ dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd);
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
}
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 4f48b30..c31c840 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -237,11 +237,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
struct stub_priv *priv;
struct urb *urb;
- dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev);
+ dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
while ((priv = stub_priv_pop(sdev))) {
urb = priv->urb;
- dev_dbg(&sdev->udev->dev, "free urb %p\n", urb);
+ dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
+ priv->seqnum);
usb_kill_urb(urb);
kmem_cache_free(stub_priv_cache, priv);
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 536e037..6c5a5931 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -211,9 +211,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
continue;
- dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
- priv->urb);
-
/*
* This matched urb is not completed yet (i.e., be in
* flight in usb hcd hardware/driver). Now we are
@@ -252,8 +249,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
ret = usb_unlink_urb(priv->urb);
if (ret != -EINPROGRESS)
dev_err(&priv->urb->dev->dev,
- "failed to unlink a urb %p, ret %d\n",
- priv->urb, ret);
+ "failed to unlink a urb # %lu, ret %d\n",
+ priv->seqnum, ret);
return 0;
}
@@ -322,23 +319,26 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
return priv;
}
-static int get_pipe(struct stub_device *sdev, int epnum, int dir)
+static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
{
struct usb_device *udev = sdev->udev;
struct usb_host_endpoint *ep;
struct usb_endpoint_descriptor *epd = NULL;
+ int epnum = pdu->base.ep;
+ int dir = pdu->base.direction;
+
+ if (epnum < 0 || epnum > 15)
+ goto err_ret;
if (dir == USBIP_DIR_IN)
ep = udev->ep_in[epnum & 0x7f];
else
ep = udev->ep_out[epnum & 0x7f];
- if (!ep) {
- dev_err(&sdev->udev->dev, "no such endpoint?, %d\n",
- epnum);
- BUG();
- }
+ if (!ep)
+ goto err_ret;
epd = &ep->desc;
+
if (usb_endpoint_xfer_control(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndctrlpipe(udev, epnum);
@@ -361,15 +361,31 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
}
if (usb_endpoint_xfer_isoc(epd)) {
+ /* validate packet size and number of packets */
+ unsigned int maxp, packets, bytes;
+
+ maxp = usb_endpoint_maxp(epd);
+ maxp *= usb_endpoint_maxp_mult(epd);
+ bytes = pdu->u.cmd_submit.transfer_buffer_length;
+ packets = DIV_ROUND_UP(bytes, maxp);
+
+ if (pdu->u.cmd_submit.number_of_packets < 0 ||
+ pdu->u.cmd_submit.number_of_packets > packets) {
+ dev_err(&sdev->udev->dev,
+ "CMD_SUBMIT: isoc invalid num packets %d\n",
+ pdu->u.cmd_submit.number_of_packets);
+ return -1;
+ }
if (dir == USBIP_DIR_OUT)
return usb_sndisocpipe(udev, epnum);
else
return usb_rcvisocpipe(udev, epnum);
}
+err_ret:
/* NOT REACHED */
- dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum);
- return 0;
+ dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
+ return -1;
}
static void masking_bogus_flags(struct urb *urb)
@@ -433,7 +449,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
struct stub_priv *priv;
struct usbip_device *ud = &sdev->ud;
struct usb_device *udev = sdev->udev;
- int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
+ int pipe = get_pipe(sdev, pdu);
+
+ if (pipe == -1)
+ return;
priv = stub_priv_alloc(sdev, pdu);
if (!priv)
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index b18bce9..f0ec41a 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -88,7 +88,7 @@ void stub_complete(struct urb *urb)
/* link a urb to the queue of tx. */
spin_lock_irqsave(&sdev->priv_lock, flags);
if (sdev->ud.tcp_socket == NULL) {
- usbip_dbg_stub_tx("ignore urb for closed connection %p", urb);
+ usbip_dbg_stub_tx("ignore urb for closed connection\n");
/* It will be freed in stub_device_cleanup_urbs(). */
} else if (priv->unlinking) {
stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
@@ -167,6 +167,13 @@ static int stub_send_ret_submit(struct stub_device *sdev)
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
+ if (urb->actual_length > 0 && !urb->transfer_buffer) {
+ dev_err(&sdev->udev->dev,
+ "urb: actual_length %d transfer_buffer null\n",
+ urb->actual_length);
+ return -1;
+ }
+
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
iovnum = 2 + urb->number_of_packets;
else
@@ -183,8 +190,8 @@ static int stub_send_ret_submit(struct stub_device *sdev)
/* 1. setup usbip_header */
setup_ret_submit_pdu(&pdu_header, urb);
- usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
- pdu_header.base.seqnum, urb);
+ usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+ pdu_header.base.seqnum);
usbip_header_correct_endian(&pdu_header, 1);
iov[iovnum].iov_base = &pdu_header;
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index f797893..ee2bbce 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -91,7 +91,7 @@ static void usbip_dump_usb_device(struct usb_device *udev)
dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)",
udev->devnum, udev->devpath, usb_speed_string(udev->speed));
- pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport);
+ pr_debug("tt hub ttport %d\n", udev->ttport);
dev_dbg(dev, " ");
for (i = 0; i < 16; i++)
@@ -124,12 +124,8 @@ static void usbip_dump_usb_device(struct usb_device *udev)
}
pr_debug("\n");
- dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus);
-
- dev_dbg(dev,
- "descriptor %p, config %p, actconfig %p, rawdescriptors %p\n",
- &udev->descriptor, udev->config,
- udev->actconfig, udev->rawdescriptors);
+ dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev),
+ udev->bus->bus_name);
dev_dbg(dev, "have_langid %d, string_langid %d\n",
udev->have_langid, udev->string_langid);
@@ -237,9 +233,6 @@ void usbip_dump_urb(struct urb *urb)
dev = &urb->dev->dev;
- dev_dbg(dev, " urb :%p\n", urb);
- dev_dbg(dev, " dev :%p\n", urb->dev);
-
usbip_dump_usb_device(urb->dev);
dev_dbg(dev, " pipe :%08x ", urb->pipe);
@@ -248,11 +241,9 @@ void usbip_dump_urb(struct urb *urb)
dev_dbg(dev, " status :%d\n", urb->status);
dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags);
- dev_dbg(dev, " transfer_buffer :%p\n", urb->transfer_buffer);
dev_dbg(dev, " transfer_buffer_length:%d\n",
urb->transfer_buffer_length);
dev_dbg(dev, " actual_length :%d\n", urb->actual_length);
- dev_dbg(dev, " setup_packet :%p\n", urb->setup_packet);
if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL)
usbip_dump_usb_ctrlrequest(
@@ -262,8 +253,6 @@ void usbip_dump_urb(struct urb *urb)
dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets);
dev_dbg(dev, " interval :%d\n", urb->interval);
dev_dbg(dev, " error_count :%d\n", urb->error_count);
- dev_dbg(dev, " context :%p\n", urb->context);
- dev_dbg(dev, " complete :%p\n", urb->complete);
}
EXPORT_SYMBOL_GPL(usbip_dump_urb);
@@ -317,26 +306,20 @@ int usbip_recv(struct socket *sock, void *buf, int size)
struct msghdr msg = {.msg_flags = MSG_NOSIGNAL};
int total = 0;
+ if (!sock || !buf || !size)
+ return -EINVAL;
+
iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size);
usbip_dbg_xmit("enter\n");
- if (!sock || !buf || !size) {
- pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
- size);
- return -EINVAL;
- }
-
do {
- int sz = msg_data_left(&msg);
+ msg_data_left(&msg);
sock->sk->sk_allocation = GFP_NOIO;
result = sock_recvmsg(sock, &msg, MSG_WAITALL);
- if (result <= 0) {
- pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
- sock, buf + total, sz, result, total);
+ if (result <= 0)
goto err;
- }
total += result;
} while (msg_data_left(&msg));
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index e5de35c..473fb8a 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -256,6 +256,7 @@ struct usbip_device {
/* lock for status */
spinlock_t lock;
+ int sockfd;
struct socket *tcp_socket;
struct task_struct *tcp_rx;
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 6b3278c..c3e1008 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -656,9 +656,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
struct vhci_device *vdev;
unsigned long flags;
- usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
- hcd, urb, mem_flags);
-
if (portnum > VHCI_HC_PORTS) {
pr_err("invalid port number %d\n", portnum);
return -ENODEV;
@@ -822,8 +819,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
struct vhci_device *vdev;
unsigned long flags;
- pr_info("dequeue a urb %p\n", urb);
-
spin_lock_irqsave(&vhci->lock, flags);
priv = urb->hcpriv;
@@ -851,7 +846,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
/* tcp connection is closed */
spin_lock(&vdev->priv_lock);
- pr_info("device %p seems to be disconnected\n", vdev);
list_del(&priv->list);
kfree(priv);
urb->hcpriv = NULL;
@@ -863,8 +857,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
* vhci_rx will receive RET_UNLINK and give back the URB.
* Otherwise, we give back it here.
*/
- pr_info("gives back urb %p\n", urb);
-
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&vhci->lock, flags);
@@ -892,8 +884,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
unlink->unlink_seqnum = priv->seqnum;
- pr_info("device %p seems to be still connected\n", vdev);
-
/* send cmd_unlink and try to cancel the pending URB in the
* peer */
list_add_tail(&unlink->list, &vdev->unlink_tx);
@@ -975,7 +965,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
/* need this? see stub_dev.c */
if (ud->tcp_socket) {
- pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket);
+ pr_debug("shutdown tcp_socket %d\n", ud->sockfd);
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
}
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 90577e8..112ebb9 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -23,24 +23,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
urb = priv->urb;
status = urb->status;
- usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n",
- urb, priv, seqnum);
+ usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum);
switch (status) {
case -ENOENT:
/* fall through */
case -ECONNRESET:
- dev_info(&urb->dev->dev,
- "urb %p was unlinked %ssynchronuously.\n", urb,
- status == -ENOENT ? "" : "a");
+ dev_dbg(&urb->dev->dev,
+ "urb seq# %u was unlinked %ssynchronuously\n",
+ seqnum, status == -ENOENT ? "" : "a");
break;
case -EINPROGRESS:
/* no info output */
break;
default:
- dev_info(&urb->dev->dev,
- "urb %p may be in a error, status %d\n", urb,
- status);
+ dev_dbg(&urb->dev->dev,
+ "urb seq# %u may be in a error, status %d\n",
+ seqnum, status);
}
list_del(&priv->list);
@@ -67,8 +66,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
spin_unlock_irqrestore(&vdev->priv_lock, flags);
if (!urb) {
- pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
- pr_info("max seqnum %d\n",
+ pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
+ pdu->base.seqnum,
atomic_read(&vhci_hcd->seqnum));
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
return;
@@ -91,7 +90,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
if (usbip_dbg_flag_vhci_rx)
usbip_dump_urb(urb);
- usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
+ usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
spin_lock_irqsave(&vhci->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb);
@@ -158,7 +157,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
pr_info("the urb (seqnum %d) was already given back\n",
pdu->base.seqnum);
} else {
- usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
+ usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
/* If unlink is successful, status is -ECONNRESET */
urb->status = pdu->u.ret_unlink.status;
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index e78f747..091f76b 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -17,15 +17,20 @@
/*
* output example:
- * hub port sta spd dev socket local_busid
- * hs 0000 004 000 00000000 c5a7bb80 1-2.3
+ * hub port sta spd dev sockfd local_busid
+ * hs 0000 004 000 00000000 3 1-2.3
* ................................................
- * ss 0008 004 000 00000000 d8cee980 2-3.4
+ * ss 0008 004 000 00000000 4 2-3.4
* ................................................
*
- * IP address can be retrieved from a socket pointer address by looking
- * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
- * port number and its peer IP address.
+ * Output includes socket fd instead of socket pointer address to avoid
+ * leaking kernel memory address in:
+ * /sys/devices/platform/vhci_hcd.0/status and in debug output.
+ * The socket pointer address is not used at the moment and it was made
+ * visible as a convenient way to find IP address from socket pointer
+ * address by looking up /proc/net/{tcp,tcp6}. As this opens a security
+ * hole, the change is made to use sockfd instead.
+ *
*/
static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev)
{
@@ -39,8 +44,8 @@ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vd
if (vdev->ud.status == VDEV_ST_USED) {
*out += sprintf(*out, "%03u %08x ",
vdev->speed, vdev->devid);
- *out += sprintf(*out, "%16p %s",
- vdev->ud.tcp_socket,
+ *out += sprintf(*out, "%u %s",
+ vdev->ud.sockfd,
dev_name(&vdev->udev->dev));
} else {
@@ -160,7 +165,8 @@ static ssize_t nports_show(struct device *dev, struct device_attribute *attr,
char *s = out;
/*
- * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, thus the * 2.
+ * Half the ports are for SPEED_HIGH and half for SPEED_SUPER,
+ * thus the * 2.
*/
out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers);
return out - s;
@@ -366,6 +372,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
vdev->devid = devid;
vdev->speed = speed;
+ vdev->ud.sockfd = sockfd;
vdev->ud.tcp_socket = socket;
vdev->ud.status = VDEV_ST_NOTASSIGNED;
diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
index d625a2f..9aed15a 100644
--- a/drivers/usb/usbip/vhci_tx.c
+++ b/drivers/usb/usbip/vhci_tx.c
@@ -69,7 +69,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
memset(&msg, 0, sizeof(msg));
memset(&iov, 0, sizeof(iov));
- usbip_dbg_vhci_tx("setup txdata urb %p\n", urb);
+ usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
+ priv->seqnum);
/* 1. setup usbip_header */
setup_cmd_submit_pdu(&pdu_header, urb);
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
index df1e309..1e8a23d 100644
--- a/drivers/usb/usbip/vudc_rx.c
+++ b/drivers/usb/usbip/vudc_rx.c
@@ -120,6 +120,25 @@ static int v_recv_cmd_submit(struct vudc *udc,
urb_p->new = 1;
urb_p->seqnum = pdu->base.seqnum;
+ if (urb_p->ep->type == USB_ENDPOINT_XFER_ISOC) {
+ /* validate packet size and number of packets */
+ unsigned int maxp, packets, bytes;
+
+ maxp = usb_endpoint_maxp(urb_p->ep->desc);
+ maxp *= usb_endpoint_maxp_mult(urb_p->ep->desc);
+ bytes = pdu->u.cmd_submit.transfer_buffer_length;
+ packets = DIV_ROUND_UP(bytes, maxp);
+
+ if (pdu->u.cmd_submit.number_of_packets < 0 ||
+ pdu->u.cmd_submit.number_of_packets > packets) {
+ dev_err(&udc->gadget.dev,
+ "CMD_SUBMIT: isoc invalid num packets %d\n",
+ pdu->u.cmd_submit.number_of_packets);
+ ret = -EMSGSIZE;
+ goto free_urbp;
+ }
+ }
+
ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type);
if (ret) {
usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c
index 1440ae0..3ccb17c 100644
--- a/drivers/usb/usbip/vudc_tx.c
+++ b/drivers/usb/usbip/vudc_tx.c
@@ -85,6 +85,13 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
memset(&pdu_header, 0, sizeof(pdu_header));
memset(&msg, 0, sizeof(msg));
+ if (urb->actual_length > 0 && !urb->transfer_buffer) {
+ dev_err(&udc->gadget.dev,
+ "urb: actual_length %d transfer_buffer null\n",
+ urb->actual_length);
+ return -1;
+ }
+
if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
iovnum = 2 + urb->number_of_packets;
else
@@ -100,8 +107,8 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
/* 1. setup usbip_header */
setup_ret_submit_pdu(&pdu_header, urb_p);
- usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
- pdu_header.base.seqnum, urb);
+ usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+ pdu_header.base.seqnum);
usbip_header_correct_endian(&pdu_header, 1);
iov[iovnum].iov_base = &pdu_header;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 33ac2b1..e6bb094 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -904,7 +904,7 @@ static void vhost_dev_lock_vqs(struct vhost_dev *d)
{
int i = 0;
for (i = 0; i < d->nvqs; ++i)
- mutex_lock(&d->vqs[i]->mutex);
+ mutex_lock_nested(&d->vqs[i]->mutex, i);
}
static void vhost_dev_unlock_vqs(struct vhost_dev *d)
@@ -1015,6 +1015,10 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
vhost_iotlb_notify_vq(dev, msg);
break;
case VHOST_IOTLB_INVALIDATE:
+ if (!dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
vhost_vq_meta_reset(dev);
vhost_del_umem_range(dev->iotlb, msg->iova,
msg->iova + msg->size - 1);
@@ -1877,12 +1881,7 @@ static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
return -1U;
/* Check they're not leading us off end of descriptors. */
- next = vhost16_to_cpu(vq, desc->next);
- /* Make sure compiler knows to grab that: we don't want it changing! */
- /* We will use the result as an index in an array, so most
- * architectures only need a compiler barrier here. */
- read_barrier_depends();
-
+ next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
return next;
}
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
index d843296..6a34ab9 100644
--- a/drivers/video/backlight/apple_bl.c
+++ b/drivers/video/backlight/apple_bl.c
@@ -143,7 +143,7 @@ static int apple_bl_add(struct acpi_device *dev)
struct pci_dev *host;
int intensity;
- host = pci_get_bus_and_slot(0, 0);
+ host = pci_get_domain_bus_and_slot(0, 0, 0);
if (!host) {
pr_err("unable to find PCI host\n");
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index d7c239e..f557406 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -177,7 +177,7 @@ static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int adrs, uint8_t data)
struct spi_message msg;
struct spi_transfer xfer = {
.len = 1,
- .cs_change = 1,
+ .cs_change = 0,
.tx_buf = lcd->buf,
};
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index eab1f84..e4bd63e 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -369,7 +369,7 @@ static int tdo24m_probe(struct spi_device *spi)
spi_message_init(m);
- x->cs_change = 1;
+ x->cs_change = 0;
x->tx_buf = &lcd->buf[0];
spi_message_add_tail(x, m);
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 6a41ea9..4dc5ee8 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -49,7 +49,7 @@ static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
struct spi_message msg;
struct spi_transfer xfer = {
.len = 1,
- .cs_change = 1,
+ .cs_change = 0,
.tx_buf = buf,
};
diff --git a/drivers/video/fbdev/macfb.c b/drivers/video/fbdev/macfb.c
index cda7587..e707e61 100644
--- a/drivers/video/fbdev/macfb.c
+++ b/drivers/video/fbdev/macfb.c
@@ -556,7 +556,7 @@ static void __init iounmap_macfb(void)
static int __init macfb_init(void)
{
int video_cmap_len, video_is_nubus = 0;
- struct nubus_dev* ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
char *option = NULL;
int err;
@@ -670,15 +670,17 @@ static int __init macfb_init(void)
* code is really broken :-)
*/
- while ((ndev = nubus_find_type(NUBUS_CAT_DISPLAY,
- NUBUS_TYPE_VIDEO, ndev)))
- {
+ for_each_func_rsrc(ndev) {
unsigned long base = ndev->board->slot_addr;
if (mac_bi_data.videoaddr < base ||
mac_bi_data.videoaddr - base > 0xFFFFFF)
continue;
+ if (ndev->category != NUBUS_CAT_DISPLAY ||
+ ndev->type != NUBUS_TYPE_VIDEO)
+ continue;
+
video_is_nubus = 1;
slot_addr = (unsigned char *)base;
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index a9192fe..c92131e 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -522,10 +522,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
return -EBUSY;
vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
- if (!vm_dev) {
- rc = -ENOMEM;
- goto free_mem;
- }
+ if (!vm_dev)
+ return -ENOMEM;
vm_dev->vdev.dev.parent = &pdev->dev;
vm_dev->vdev.dev.release = virtio_mmio_release_dev;
@@ -535,17 +533,14 @@ static int virtio_mmio_probe(struct platform_device *pdev)
spin_lock_init(&vm_dev->lock);
vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
- if (vm_dev->base == NULL) {
- rc = -EFAULT;
- goto free_vmdev;
- }
+ if (vm_dev->base == NULL)
+ return -EFAULT;
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
- rc = -ENODEV;
- goto unmap;
+ return -ENODEV;
}
/* Check device version */
@@ -553,8 +548,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
if (vm_dev->version < 1 || vm_dev->version > 2) {
dev_err(&pdev->dev, "Version %ld not supported!\n",
vm_dev->version);
- rc = -ENXIO;
- goto unmap;
+ return -ENXIO;
}
vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
@@ -563,8 +557,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
* virtio-mmio device with an ID 0 is a (dummy) placeholder
* with no function. End probing now with no error reported.
*/
- rc = -ENODEV;
- goto unmap;
+ return -ENODEV;
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
@@ -590,33 +583,15 @@ static int virtio_mmio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vm_dev);
rc = register_virtio_device(&vm_dev->vdev);
- if (rc) {
- iounmap(vm_dev->base);
- devm_release_mem_region(&pdev->dev, mem->start,
- resource_size(mem));
+ if (rc)
put_device(&vm_dev->vdev.dev);
- }
- return rc;
-unmap:
- iounmap(vm_dev->base);
-free_mem:
- devm_release_mem_region(&pdev->dev, mem->start,
- resource_size(mem));
-free_vmdev:
- devm_kfree(&pdev->dev, vm_dev);
+
return rc;
}
static int virtio_mmio_remove(struct platform_device *pdev)
{
struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
- struct resource *mem;
-
- iounmap(vm_dev->base);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem)
- devm_release_mem_region(&pdev->dev, mem->start,
- resource_size(mem));
unregister_virtio_device(&vm_dev->vdev);
return 0;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ca200d1..5bf613d 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -223,6 +223,13 @@ config ZIIRAVE_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called ziirave_wdt.
+config RAVE_SP_WATCHDOG
+ tristate "RAVE SP Watchdog timer"
+ depends on RAVE_SP_CORE
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog on RAVE SP device.
+
# ALPHA Architecture
# ARM Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 715a210..135c5e8 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -224,3 +224,4 @@ obj-$(CONFIG_MAX77620_WATCHDOG) += max77620_wdt.o
obj-$(CONFIG_ZIIRAVE_WATCHDOG) += ziirave_wdt.o
obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o
obj-$(CONFIG_MENF21BMC_WATCHDOG) += menf21bmc_wdt.o
+obj-$(CONFIG_RAVE_SP_WATCHDOG) += rave-sp-wdt.o
diff --git a/drivers/watchdog/rave-sp-wdt.c b/drivers/watchdog/rave-sp-wdt.c
new file mode 100644
index 0000000..35db173
--- /dev/null
+++ b/drivers/watchdog/rave-sp-wdt.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Driver for watchdog aspect of for Zodiac Inflight Innovations RAVE
+ * Supervisory Processor(SP) MCU
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovation
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rave-sp.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/watchdog.h>
+
+enum {
+ RAVE_SP_RESET_BYTE = 1,
+ RAVE_SP_RESET_REASON_NORMAL = 0,
+ RAVE_SP_RESET_DELAY_MS = 500,
+};
+
+/**
+ * struct rave_sp_wdt_variant - RAVE SP watchdog variant
+ *
+ * @max_timeout: Largest possible watchdog timeout setting
+ * @min_timeout: Smallest possible watchdog timeout setting
+ *
+ * @configure: Function to send configuration command
+ * @restart: Function to send "restart" command
+ */
+struct rave_sp_wdt_variant {
+ unsigned int max_timeout;
+ unsigned int min_timeout;
+
+ int (*configure)(struct watchdog_device *, bool);
+ int (*restart)(struct watchdog_device *);
+};
+
+/**
+ * struct rave_sp_wdt - RAVE SP watchdog
+ *
+ * @wdd: Underlying watchdog device
+ * @sp: Pointer to parent RAVE SP device
+ * @variant: Device specific variant information
+ * @reboot_notifier: Reboot notifier implementing machine reset
+ */
+struct rave_sp_wdt {
+ struct watchdog_device wdd;
+ struct rave_sp *sp;
+ const struct rave_sp_wdt_variant *variant;
+ struct notifier_block reboot_notifier;
+};
+
+static struct rave_sp_wdt *to_rave_sp_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct rave_sp_wdt, wdd);
+}
+
+static int rave_sp_wdt_exec(struct watchdog_device *wdd, void *data,
+ size_t data_size)
+{
+ return rave_sp_exec(to_rave_sp_wdt(wdd)->sp,
+ data, data_size, NULL, 0);
+}
+
+static int rave_sp_wdt_legacy_configure(struct watchdog_device *wdd, bool on)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_SW_WDT,
+ [1] = 0,
+ [2] = 0,
+ [3] = on,
+ [4] = on ? wdd->timeout : 0,
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_rdu_configure(struct watchdog_device *wdd, bool on)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_SW_WDT,
+ [1] = 0,
+ [2] = on,
+ [3] = (u8)wdd->timeout,
+ [4] = (u8)(wdd->timeout >> 8),
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+/**
+ * rave_sp_wdt_configure - Configure watchdog device
+ *
+ * @wdd: Device to configure
+ * @on: Desired state of the watchdog timer (ON/OFF)
+ *
+ * This function configures two aspects of the watchdog timer:
+ *
+ * - Wheither it is ON or OFF
+ * - Its timeout duration
+ *
+ * with first aspect specified via function argument and second via
+ * the value of 'wdd->timeout'.
+ */
+static int rave_sp_wdt_configure(struct watchdog_device *wdd, bool on)
+{
+ return to_rave_sp_wdt(wdd)->variant->configure(wdd, on);
+}
+
+static int rave_sp_wdt_legacy_restart(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_RESET,
+ [1] = 0,
+ [2] = RAVE_SP_RESET_BYTE
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_rdu_restart(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_RESET,
+ [1] = 0,
+ [2] = RAVE_SP_RESET_BYTE,
+ [3] = RAVE_SP_RESET_REASON_NORMAL
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ /*
+ * Restart handler is called in atomic context which means we
+ * can't communicate to SP via UART. Luckily for use SP will
+ * wait 500ms before actually resetting us, so we ask it to do
+ * so here and let the rest of the system go on wrapping
+ * things up.
+ */
+ if (action == SYS_DOWN || action == SYS_HALT) {
+ struct rave_sp_wdt *sp_wd =
+ container_of(nb, struct rave_sp_wdt, reboot_notifier);
+
+ const int ret = sp_wd->variant->restart(&sp_wd->wdd);
+
+ if (ret < 0)
+ dev_err(sp_wd->wdd.parent,
+ "Failed to issue restart command (%d)", ret);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int rave_sp_wdt_restart(struct watchdog_device *wdd,
+ unsigned long action, void *data)
+{
+ /*
+ * The actual work was done by reboot notifier above. SP
+ * firmware waits 500 ms before issuing reset, so let's hang
+ * here for twice that delay and hopefuly we'd never reach
+ * the return statement.
+ */
+ mdelay(2 * RAVE_SP_RESET_DELAY_MS);
+
+ return -EIO;
+}
+
+static int rave_sp_wdt_start(struct watchdog_device *wdd)
+{
+ int ret;
+
+ ret = rave_sp_wdt_configure(wdd, true);
+ if (!ret)
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+
+ return ret;
+}
+
+static int rave_sp_wdt_stop(struct watchdog_device *wdd)
+{
+ return rave_sp_wdt_configure(wdd, false);
+}
+
+static int rave_sp_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ wdd->timeout = timeout;
+
+ return rave_sp_wdt_configure(wdd, watchdog_active(wdd));
+}
+
+static int rave_sp_wdt_ping(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_PET_WDT,
+ [1] = 0,
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static const struct watchdog_info rave_sp_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "RAVE SP Watchdog",
+};
+
+static const struct watchdog_ops rave_sp_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rave_sp_wdt_start,
+ .stop = rave_sp_wdt_stop,
+ .ping = rave_sp_wdt_ping,
+ .set_timeout = rave_sp_wdt_set_timeout,
+ .restart = rave_sp_wdt_restart,
+};
+
+static const struct rave_sp_wdt_variant rave_sp_wdt_legacy = {
+ .max_timeout = 255,
+ .min_timeout = 1,
+ .configure = rave_sp_wdt_legacy_configure,
+ .restart = rave_sp_wdt_legacy_restart,
+};
+
+static const struct rave_sp_wdt_variant rave_sp_wdt_rdu = {
+ .max_timeout = 180,
+ .min_timeout = 60,
+ .configure = rave_sp_wdt_rdu_configure,
+ .restart = rave_sp_wdt_rdu_restart,
+};
+
+static const struct of_device_id rave_sp_wdt_of_match[] = {
+ {
+ .compatible = "zii,rave-sp-watchdog-legacy",
+ .data = &rave_sp_wdt_legacy,
+ },
+ {
+ .compatible = "zii,rave-sp-watchdog",
+ .data = &rave_sp_wdt_rdu,
+ },
+ { /* sentinel */ }
+};
+
+static int rave_sp_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct watchdog_device *wdd;
+ struct rave_sp_wdt *sp_wd;
+ struct nvmem_cell *cell;
+ __le16 timeout = 0;
+ int ret;
+
+ sp_wd = devm_kzalloc(dev, sizeof(*sp_wd), GFP_KERNEL);
+ if (!sp_wd)
+ return -ENOMEM;
+
+ sp_wd->variant = of_device_get_match_data(dev);
+ sp_wd->sp = dev_get_drvdata(dev->parent);
+
+ wdd = &sp_wd->wdd;
+ wdd->parent = dev;
+ wdd->info = &rave_sp_wdt_info;
+ wdd->ops = &rave_sp_wdt_ops;
+ wdd->min_timeout = sp_wd->variant->min_timeout;
+ wdd->max_timeout = sp_wd->variant->max_timeout;
+ wdd->status = WATCHDOG_NOWAYOUT_INIT_STATUS;
+ wdd->timeout = 60;
+
+ cell = nvmem_cell_get(dev, "wdt-timeout");
+ if (!IS_ERR(cell)) {
+ size_t len;
+ void *value = nvmem_cell_read(cell, &len);
+
+ if (!IS_ERR(value)) {
+ memcpy(&timeout, value, min(len, sizeof(timeout)));
+ kfree(value);
+ }
+ nvmem_cell_put(cell);
+ }
+ watchdog_init_timeout(wdd, le16_to_cpu(timeout), dev);
+ watchdog_set_restart_priority(wdd, 255);
+ watchdog_stop_on_unregister(wdd);
+
+ sp_wd->reboot_notifier.notifier_call = rave_sp_wdt_reboot_notifier;
+ ret = devm_register_reboot_notifier(dev, &sp_wd->reboot_notifier);
+ if (ret) {
+ dev_err(dev, "Failed to register reboot notifier\n");
+ return ret;
+ }
+
+ /*
+ * We don't know if watchdog is running now. To be sure, let's
+ * start it and depend on watchdog core to ping it
+ */
+ wdd->max_hw_heartbeat_ms = wdd->max_timeout * 1000;
+ ret = rave_sp_wdt_start(wdd);
+ if (ret) {
+ dev_err(dev, "Watchdog didn't start\n");
+ return ret;
+ }
+
+ ret = devm_watchdog_register_device(dev, wdd);
+ if (ret) {
+ dev_err(dev, "Failed to register watchdog device\n");
+ rave_sp_wdt_stop(wdd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver rave_sp_wdt_driver = {
+ .probe = rave_sp_wdt_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = rave_sp_wdt_of_match,
+ },
+};
+
+module_platform_driver(rave_sp_wdt_driver);
+
+MODULE_DEVICE_TABLE(of, rave_sp_wdt_of_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>");
+MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("RAVE SP Watchdog driver");
+MODULE_ALIAS("platform:rave-sp-watchdog");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index d8dd546..e5d0c28 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -269,7 +269,7 @@ config XEN_ACPI_HOTPLUG_CPU
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
- depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
+ depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
default m
help
This ACPI processor uploads Power Management information to the Xen
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index f77e499..065f0b6 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -257,10 +257,25 @@ static void release_memory_resource(struct resource *resource)
kfree(resource);
}
+/*
+ * Host memory not allocated to dom0. We can use this range for hotplug-based
+ * ballooning.
+ *
+ * It's a type-less resource. Setting IORESOURCE_MEM will make resource
+ * management algorithms (arch_remove_reservations()) look into guest e820,
+ * which we don't want.
+ */
+static struct resource hostmem_resource = {
+ .name = "Host RAM",
+};
+
+void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
+{}
+
static struct resource *additional_memory_resource(phys_addr_t size)
{
- struct resource *res;
- int ret;
+ struct resource *res, *res_hostmem;
+ int ret = -ENOMEM;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
@@ -269,13 +284,42 @@ static struct resource *additional_memory_resource(phys_addr_t size)
res->name = "System RAM";
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
- ret = allocate_resource(&iomem_resource, res,
- size, 0, -1,
- PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
- if (ret < 0) {
- pr_err("Cannot allocate new System RAM resource\n");
- kfree(res);
- return NULL;
+ res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (res_hostmem) {
+ /* Try to grab a range from hostmem */
+ res_hostmem->name = "Host memory";
+ ret = allocate_resource(&hostmem_resource, res_hostmem,
+ size, 0, -1,
+ PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+ }
+
+ if (!ret) {
+ /*
+ * Insert this resource into iomem. Because hostmem_resource
+ * tracks portion of guest e820 marked as UNUSABLE noone else
+ * should try to use it.
+ */
+ res->start = res_hostmem->start;
+ res->end = res_hostmem->end;
+ ret = insert_resource(&iomem_resource, res);
+ if (ret < 0) {
+ pr_err("Can't insert iomem_resource [%llx - %llx]\n",
+ res->start, res->end);
+ release_memory_resource(res_hostmem);
+ res_hostmem = NULL;
+ res->start = res->end = 0;
+ }
+ }
+
+ if (ret) {
+ ret = allocate_resource(&iomem_resource, res,
+ size, 0, -1,
+ PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+ if (ret < 0) {
+ pr_err("Cannot allocate new System RAM resource\n");
+ kfree(res);
+ return NULL;
+ }
}
#ifdef CONFIG_SPARSEMEM
@@ -287,6 +331,7 @@ static struct resource *additional_memory_resource(phys_addr_t size)
pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
pfn, limit);
release_memory_resource(res);
+ release_memory_resource(res_hostmem);
return NULL;
}
}
@@ -765,6 +810,8 @@ static int __init balloon_init(void)
set_online_page_callback(&xen_online_page);
register_memory_notifier(&xen_memory_nb);
register_sysctl_table(xen_root);
+
+ arch_xen_balloon_init(&hostmem_resource);
#endif
#ifdef CONFIG_XEN_PV
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 57efbd3..bd56653 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -380,10 +380,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
}
range = 0;
while (range < pages) {
- if (map->unmap_ops[offset+range].handle == -1) {
- range--;
+ if (map->unmap_ops[offset+range].handle == -1)
break;
- }
range++;
}
err = __unmap_grant_pages(map, offset, range);
@@ -1073,8 +1071,10 @@ unlock_out:
out_unlock_put:
mutex_unlock(&priv->lock);
out_put_map:
- if (use_ptemod)
+ if (use_ptemod) {
map->vma = NULL;
+ unmap_grant_pages(map, 0, map->count);
+ }
gntdev_put_map(priv, map);
return err;
}
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index d1e1d8d..4c789e6 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -805,7 +805,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
pvcalls_exit();
return ret;
}
- map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
+ map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
if (map2 == NULL) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
OpenPOWER on IntegriCloud