summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c208
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c229
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c517
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c222
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c466
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c224
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h6
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h270
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c49
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c96
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c410
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c27
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c100
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h18
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c226
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c23
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c47
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c36
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig1
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c11
-rw-r--r--drivers/gpu/drm/drm_connector.c38
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c83
-rw-r--r--drivers/gpu/drm/drm_drv.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c8
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_plane.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c19
-rw-r--r--drivers/gpu/drm/gma500/gtt.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c18
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug13
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c30
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c32
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h13
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c69
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c122
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c15
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c3
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h12
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c10
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c75
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c45
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c5
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h13
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c7
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c20
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c19
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c41
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c45
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c5
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c41
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h19
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c28
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c20
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c9
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c9
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h1
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c14
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c4
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/cik.c36
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c25
-rw-r--r--drivers/gpu/drm/radeon/r420.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c7
-rw-r--r--drivers/gpu/drm/radeon/si.c36
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c12
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c11
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h3
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/Makefile4
-rw-r--r--drivers/gpu/drm/tegra/drm.c281
-rw-r--r--drivers/gpu/drm/tegra/drm.h15
-rw-r--r--drivers/gpu/drm/tegra/falcon.c259
-rw-r--r--drivers/gpu/drm/tegra/falcon.h127
-rw-r--r--drivers/gpu/drm/tegra/fb.c23
-rw-r--r--drivers/gpu/drm/tegra/gem.c12
-rw-r--r--drivers/gpu/drm/tegra/vic.c396
-rw-r--r--drivers/gpu/drm/tegra/vic.h31
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c115
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c23
224 files changed, 4622 insertions, 2931 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6a81299..833c3c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -110,6 +110,7 @@ extern int amdgpu_pos_buf_per_se;
extern int amdgpu_cntl_sb_buf_per_se;
extern int amdgpu_param_buf_per_se;
+#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
@@ -966,6 +967,8 @@ struct amdgpu_gfx_config {
unsigned mc_arb_ramcfg;
unsigned gb_addr_config;
unsigned num_rbs;
+ unsigned gs_vgt_table_depth;
+ unsigned gs_prim_buffer_depth;
uint32_t tile_mode_array[32];
uint32_t macrotile_mode_array[16];
@@ -980,6 +983,7 @@ struct amdgpu_gfx_config {
struct amdgpu_cu_info {
uint32_t number; /* total active CU number */
uint32_t ao_cu_mask;
+ uint32_t wave_front_size;
uint32_t bitmap[4][4];
};
@@ -1000,10 +1004,10 @@ struct amdgpu_ngg_buf {
};
enum {
- PRIM = 0,
- POS,
- CNTL,
- PARAM,
+ NGG_PRIM = 0,
+ NGG_POS,
+ NGG_CNTL,
+ NGG_PARAM,
NGG_BUF_MAX
};
@@ -1125,6 +1129,7 @@ struct amdgpu_job {
void *owner;
uint64_t fence_ctx; /* the fence_context this job uses */
bool vm_needs_flush;
+ bool need_pipeline_sync;
unsigned vm_id;
uint64_t vm_pd_addr;
uint32_t gds_base, gds_size;
@@ -1704,9 +1709,6 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
#define WREG32_FIELD_OFFSET(reg, offset, field, val) \
WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
-#define WREG32_FIELD15(ip, idx, reg, field, val) \
- WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
-
/*
* BIOS helpers.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index ad43299..1e8e112 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 60000;
+ } else if (adev->clock.default_dispclk <= 60000) {
+ DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
+ adev->clock.default_dispclk / 100);
+ adev->clock.default_dispclk = 62500;
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
@@ -1727,6 +1731,12 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
{
int i;
+ /*
+ * VBIOS will check ASIC_INIT_COMPLETE bit to decide if
+ * execute ASIC_Init posting via driver
+ */
+ adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
+
for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 4b9abd6..4bdda56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -26,6 +26,7 @@
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "atom.h"
+#include "atombios.h"
#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
@@ -77,10 +78,29 @@ void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev)
{
int i;
+ /*
+ * VBIOS will check ASIC_INIT_COMPLETE bit to decide if
+ * execute ASIC_Init posting via driver
+ */
+ adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
+
for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
}
+void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev,
+ bool hung)
+{
+ u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3);
+
+ if (hung)
+ tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ else
+ tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+ WREG32(adev->bios_scratch_reg_offset + 3, tmp);
+}
+
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index d0c4dcd..a2c3ebe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -28,6 +28,8 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev);
+void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev,
+ bool hung);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index cc97eee..1beae5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -117,8 +117,13 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
}
out_cleanup:
+ /* Check error value now. The value can be overwritten when clean up.*/
+ if (r) {
+ DRM_ERROR("Error while benchmarking BO move.\n");
+ }
+
if (sobj) {
- r = amdgpu_bo_reserve(sobj, false);
+ r = amdgpu_bo_reserve(sobj, true);
if (likely(r == 0)) {
amdgpu_bo_unpin(sobj);
amdgpu_bo_unreserve(sobj);
@@ -126,17 +131,13 @@ out_cleanup:
amdgpu_bo_unref(&sobj);
}
if (dobj) {
- r = amdgpu_bo_reserve(dobj, false);
+ r = amdgpu_bo_reserve(dobj, true);
if (likely(r == 0)) {
amdgpu_bo_unpin(dobj);
amdgpu_bo_unreserve(dobj);
}
amdgpu_bo_unref(&dobj);
}
-
- if (r) {
- DRM_ERROR("Error while benchmarking BO move.\n");
- }
}
void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 1c7e6c2..c6dba1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -42,82 +42,6 @@ struct amdgpu_cgs_device {
struct amdgpu_device *adev = \
((struct amdgpu_cgs_device *)cgs_device)->adev
-static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
- uint64_t *mc_start, uint64_t *mc_size,
- uint64_t *mem_size)
-{
- CGS_FUNC_ADEV;
- switch(type) {
- case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
- case CGS_GPU_MEM_TYPE__VISIBLE_FB:
- *mc_start = 0;
- *mc_size = adev->mc.visible_vram_size;
- *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
- break;
- case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
- case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
- *mc_start = adev->mc.visible_vram_size;
- *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
- *mem_size = *mc_size;
- break;
- case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
- case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
- *mc_start = adev->mc.gtt_start;
- *mc_size = adev->mc.gtt_size;
- *mem_size = adev->mc.gtt_size - adev->gart_pin_size;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem,
- uint64_t size,
- uint64_t min_offset, uint64_t max_offset,
- cgs_handle_t *kmem_handle, uint64_t *mcaddr)
-{
- CGS_FUNC_ADEV;
- int ret;
- struct amdgpu_bo *bo;
- struct page *kmem_page = vmalloc_to_page(kmem);
- int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
-
- struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
- ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
- AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
- if (ret)
- return ret;
- ret = amdgpu_bo_reserve(bo, false);
- if (unlikely(ret != 0))
- return ret;
-
- /* pin buffer into GTT */
- ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
- min_offset, max_offset, mcaddr);
- amdgpu_bo_unreserve(bo);
-
- *kmem_handle = (cgs_handle_t)bo;
- return ret;
-}
-
-static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle)
-{
- struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
-
- if (obj) {
- int r = amdgpu_bo_reserve(obj, false);
- if (likely(r == 0)) {
- amdgpu_bo_unpin(obj);
- amdgpu_bo_unreserve(obj);
- }
- amdgpu_bo_unref(&obj);
-
- }
- return 0;
-}
-
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
@@ -215,7 +139,7 @@ static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
if (obj) {
- int r = amdgpu_bo_reserve(obj, false);
+ int r = amdgpu_bo_reserve(obj, true);
if (likely(r == 0)) {
amdgpu_bo_kunmap(obj);
amdgpu_bo_unpin(obj);
@@ -239,7 +163,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
- r = amdgpu_bo_reserve(obj, false);
+ r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
@@ -252,7 +176,7 @@ static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
- r = amdgpu_bo_reserve(obj, false);
+ r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_unpin(obj);
@@ -265,7 +189,7 @@ static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
- r = amdgpu_bo_reserve(obj, false);
+ r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_kmap(obj, map);
@@ -277,7 +201,7 @@ static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
- r = amdgpu_bo_reserve(obj, false);
+ r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
amdgpu_bo_kunmap(obj);
@@ -349,62 +273,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
WARN(1, "Invalid indirect register space");
}
-static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr)
-{
- CGS_FUNC_ADEV;
- uint8_t val;
- int ret = pci_read_config_byte(adev->pdev, addr, &val);
- if (WARN(ret, "pci_read_config_byte error"))
- return 0;
- return val;
-}
-
-static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr)
-{
- CGS_FUNC_ADEV;
- uint16_t val;
- int ret = pci_read_config_word(adev->pdev, addr, &val);
- if (WARN(ret, "pci_read_config_word error"))
- return 0;
- return val;
-}
-
-static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device,
- unsigned addr)
-{
- CGS_FUNC_ADEV;
- uint32_t val;
- int ret = pci_read_config_dword(adev->pdev, addr, &val);
- if (WARN(ret, "pci_read_config_dword error"))
- return 0;
- return val;
-}
-
-static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr,
- uint8_t value)
-{
- CGS_FUNC_ADEV;
- int ret = pci_write_config_byte(adev->pdev, addr, value);
- WARN(ret, "pci_write_config_byte error");
-}
-
-static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr,
- uint16_t value)
-{
- CGS_FUNC_ADEV;
- int ret = pci_write_config_word(adev->pdev, addr, value);
- WARN(ret, "pci_write_config_word error");
-}
-
-static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr,
- uint32_t value)
-{
- CGS_FUNC_ADEV;
- int ret = pci_write_config_dword(adev->pdev, addr, value);
- WARN(ret, "pci_write_config_dword error");
-}
-
-
static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
enum cgs_resource_type resource_type,
uint64_t size,
@@ -477,56 +345,6 @@ static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigne
adev->mode_info.atom_context, table, args);
}
-static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request)
-{
- /* TODO */
- return 0;
-}
-
-static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request)
-{
- /* TODO */
- return 0;
-}
-
-static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request,
- int active)
-{
- /* TODO */
- return 0;
-}
-
-static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request,
- enum cgs_clock clock, unsigned freq)
-{
- /* TODO */
- return 0;
-}
-
-static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request,
- enum cgs_engine engine, int powered)
-{
- /* TODO */
- return 0;
-}
-
-
-
-static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device,
- enum cgs_clock clock,
- struct cgs_clock_limits *limits)
-{
- /* TODO */
- return 0;
-}
-
-static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask,
- const uint32_t *voltages)
-{
- DRM_ERROR("not implemented");
- return -EPERM;
-}
-
struct cgs_irq_params {
unsigned src_id;
cgs_irq_source_set_func_t set;
@@ -1269,9 +1087,6 @@ static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
}
static const struct cgs_ops amdgpu_cgs_ops = {
- .gpu_mem_info = amdgpu_cgs_gpu_mem_info,
- .gmap_kmem = amdgpu_cgs_gmap_kmem,
- .gunmap_kmem = amdgpu_cgs_gunmap_kmem,
.alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
.free_gpu_mem = amdgpu_cgs_free_gpu_mem,
.gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
@@ -1282,23 +1097,10 @@ static const struct cgs_ops amdgpu_cgs_ops = {
.write_register = amdgpu_cgs_write_register,
.read_ind_register = amdgpu_cgs_read_ind_register,
.write_ind_register = amdgpu_cgs_write_ind_register,
- .read_pci_config_byte = amdgpu_cgs_read_pci_config_byte,
- .read_pci_config_word = amdgpu_cgs_read_pci_config_word,
- .read_pci_config_dword = amdgpu_cgs_read_pci_config_dword,
- .write_pci_config_byte = amdgpu_cgs_write_pci_config_byte,
- .write_pci_config_word = amdgpu_cgs_write_pci_config_word,
- .write_pci_config_dword = amdgpu_cgs_write_pci_config_dword,
.get_pci_resource = amdgpu_cgs_get_pci_resource,
.atom_get_data_table = amdgpu_cgs_atom_get_data_table,
.atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
.atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
- .create_pm_request = amdgpu_cgs_create_pm_request,
- .destroy_pm_request = amdgpu_cgs_destroy_pm_request,
- .set_pm_request = amdgpu_cgs_set_pm_request,
- .pm_request_clock = amdgpu_cgs_pm_request_clock,
- .pm_request_engine = amdgpu_cgs_pm_request_engine,
- .pm_query_clock_limits = amdgpu_cgs_pm_query_clock_limits,
- .set_camera_voltages = amdgpu_cgs_set_camera_voltages,
.get_firmware_info = amdgpu_cgs_get_firmware_info,
.rel_firmware = amdgpu_cgs_rel_firmware,
.set_powergating_state = amdgpu_cgs_set_powergating_state,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index ec71b93..4e6b950 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1074,6 +1074,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
+ amdgpu_cs_parser_fini(p, 0, true);
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
@@ -1129,7 +1130,10 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
r = amdgpu_cs_submit(&parser, cs);
+ if (r)
+ goto out;
+ return 0;
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index cf05006..90d1ac8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -273,6 +273,9 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
spin_lock(&ctx->ring_lock);
+ if (seq == ~0ull)
+ seq = ctx->rings[ring->idx].sequence - 1;
+
if (seq >= cring->sequence) {
spin_unlock(&ctx->ring_lock);
return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4836607..43ca16b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -53,7 +53,6 @@
#include "bif/bif_4_1_d.h"
#include <linux/pci.h>
#include <linux/firmware.h>
-#include "amdgpu_pm.h"
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
@@ -350,7 +349,7 @@ static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
if (adev->vram_scratch.robj == NULL) {
return;
}
- r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
+ r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
if (likely(r == 0)) {
amdgpu_bo_kunmap(adev->vram_scratch.robj);
amdgpu_bo_unpin(adev->vram_scratch.robj);
@@ -422,12 +421,11 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev)
if (adev->doorbell.num_doorbells == 0)
return -EINVAL;
- adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
- if (adev->doorbell.ptr == NULL) {
+ adev->doorbell.ptr = ioremap(adev->doorbell.base,
+ adev->doorbell.num_doorbells *
+ sizeof(u32));
+ if (adev->doorbell.ptr == NULL)
return -ENOMEM;
- }
- DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
- DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
return 0;
}
@@ -1584,9 +1582,6 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
}
}
- amdgpu_dpm_enable_uvd(adev, false);
- amdgpu_dpm_enable_vce(adev, false);
-
return 0;
}
@@ -1854,7 +1849,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
- mutex_init(&adev->vm_manager.lock);
atomic_set(&adev->irq.ih.lock, 0);
mutex_init(&adev->firmware.mutex);
mutex_init(&adev->pm.mutex);
@@ -2071,7 +2065,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
DRM_INFO("amdgpu: finishing device.\n");
adev->shutdown = true;
- drm_crtc_force_disable_all(adev->ddev);
+ if (adev->mode_info.mode_config_initialized)
+ drm_crtc_force_disable_all(adev->ddev);
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev);
@@ -2146,7 +2141,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, false);
+ r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
@@ -2159,7 +2154,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
robj = gem_to_amdgpu_bo(rfb->obj);
/* don't unpin kernel fb objects */
if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
- r = amdgpu_bo_reserve(robj, false);
+ r = amdgpu_bo_reserve(robj, true);
if (r == 0) {
amdgpu_bo_unpin(robj);
amdgpu_bo_unreserve(robj);
@@ -2216,7 +2211,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
struct drm_connector *connector;
struct amdgpu_device *adev = dev->dev_private;
struct drm_crtc *crtc;
- int r;
+ int r = 0;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -2228,11 +2223,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
r = pci_enable_device(dev->pdev);
- if (r) {
- if (fbcon)
- console_unlock();
- return r;
- }
+ if (r)
+ goto unlock;
}
if (adev->is_atom_fw)
amdgpu_atomfirmware_scratch_regs_restore(adev);
@@ -2249,7 +2241,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
r = amdgpu_resume(adev);
if (r) {
DRM_ERROR("amdgpu_resume failed (%d).\n", r);
- return r;
+ goto unlock;
}
amdgpu_fence_driver_resume(adev);
@@ -2260,11 +2252,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
r = amdgpu_late_init(adev);
- if (r) {
- if (fbcon)
- console_unlock();
- return r;
- }
+ if (r)
+ goto unlock;
/* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -2272,7 +2261,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, false);
+ r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
r = amdgpu_bo_pin(aobj,
AMDGPU_GEM_DOMAIN_VRAM,
@@ -2314,12 +2303,14 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
dev->dev->power.disable_depth--;
#endif
- if (fbcon) {
+ if (fbcon)
amdgpu_fbdev_set_suspend(adev, 0);
+
+unlock:
+ if (fbcon)
console_unlock();
- }
- return 0;
+ return r;
}
static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
@@ -2430,25 +2421,37 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
uint32_t domain;
int r;
- if (!bo->shadow)
- return 0;
+ if (!bo->shadow)
+ return 0;
+
+ r = amdgpu_bo_reserve(bo, true);
+ if (r)
+ return r;
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ /* if bo has been evicted, then no need to recover */
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ r = amdgpu_bo_validate(bo->shadow);
+ if (r) {
+ DRM_ERROR("bo validate failed!\n");
+ goto err;
+ }
- r = amdgpu_bo_reserve(bo, false);
- if (r)
- return r;
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
- /* if bo has been evicted, then no need to recover */
- if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
+ r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
+ if (r) {
+ DRM_ERROR("%p bind failed\n", bo->shadow);
+ goto err;
+ }
+
+ r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
NULL, fence, true);
- if (r) {
- DRM_ERROR("recover page table failed!\n");
- goto err;
- }
- }
+ if (r) {
+ DRM_ERROR("recover page table failed!\n");
+ goto err;
+ }
+ }
err:
- amdgpu_bo_unreserve(bo);
- return r;
+ amdgpu_bo_unreserve(bo);
+ return r;
}
/**
@@ -2520,6 +2523,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
ring = adev->mman.buffer_funcs_ring;
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
+ next = NULL;
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
r = dma_fence_wait(fence, false);
@@ -2593,7 +2597,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring)
+ if (!ring || !ring->sched.thread)
continue;
kthread_park(ring->sched.thread);
amd_sched_hw_job_reset(&ring->sched);
@@ -2666,6 +2670,7 @@ retry:
DRM_INFO("recover vram bo from shadow\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
+ next = NULL;
amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
r = dma_fence_wait(fence, false);
@@ -2688,7 +2693,8 @@ retry:
}
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring)
+
+ if (!ring || !ring->sched.thread)
continue;
amd_sched_job_recovery(&ring->sched);
@@ -2697,7 +2703,7 @@ retry:
} else {
dev_err(adev->dev, "asic resume failed (%d).\n", r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (adev->rings[i]) {
+ if (adev->rings[i] && adev->rings[i]->sched.thread) {
kthread_unpark(adev->rings[i]->sched.thread);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 96926a2..cdf2ab2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -123,7 +123,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
int r;
/* unpin of the old buffer */
- r = amdgpu_bo_reserve(work->old_abo, false);
+ r = amdgpu_bo_reserve(work->old_abo, true);
if (likely(r == 0)) {
r = amdgpu_bo_unpin(work->old_abo);
if (unlikely(r != 0)) {
@@ -138,52 +138,11 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
kfree(work);
}
-
-static void amdgpu_flip_work_cleanup(struct amdgpu_flip_work *work)
-{
- int i;
-
- amdgpu_bo_unref(&work->old_abo);
- dma_fence_put(work->excl);
- for (i = 0; i < work->shared_count; ++i)
- dma_fence_put(work->shared[i]);
- kfree(work->shared);
- kfree(work);
-}
-
-static void amdgpu_flip_cleanup_unreserve(struct amdgpu_flip_work *work,
- struct amdgpu_bo *new_abo)
-{
- amdgpu_bo_unreserve(new_abo);
- amdgpu_flip_work_cleanup(work);
-}
-
-static void amdgpu_flip_cleanup_unpin(struct amdgpu_flip_work *work,
- struct amdgpu_bo *new_abo)
-{
- if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
- DRM_ERROR("failed to unpin new abo in error path\n");
- amdgpu_flip_cleanup_unreserve(work, new_abo);
-}
-
-void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
- struct amdgpu_bo *new_abo)
-{
- if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
- DRM_ERROR("failed to reserve new abo in error path\n");
- amdgpu_flip_work_cleanup(work);
- return;
- }
- amdgpu_flip_cleanup_unpin(work, new_abo);
-}
-
-int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags,
- uint32_t target,
- struct amdgpu_flip_work **work_p,
- struct amdgpu_bo **new_abo_p)
+int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags, uint32_t target,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
@@ -196,7 +155,7 @@ int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
unsigned long flags;
u64 tiling_flags;
u64 base;
- int r;
+ int i, r;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
@@ -257,80 +216,41 @@ int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
r = -EBUSY;
goto pflip_cleanup;
-
}
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-
- *work_p = work;
- *new_abo_p = new_abo;
-
- return 0;
-
-pflip_cleanup:
- amdgpu_crtc_cleanup_flip_ctx(work, new_abo);
- return r;
-
-unpin:
- amdgpu_flip_cleanup_unpin(work, new_abo);
- return r;
-
-unreserve:
- amdgpu_flip_cleanup_unreserve(work, new_abo);
- return r;
-cleanup:
- amdgpu_flip_work_cleanup(work);
- return r;
-
-}
-
-void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct amdgpu_flip_work *work,
- struct amdgpu_bo *new_abo)
-{
- unsigned long flags;
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
amdgpu_crtc->pflip_works = work;
+
+ DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
+ amdgpu_crtc->crtc_id, amdgpu_crtc, work);
/* update crtc fb */
crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-
- DRM_DEBUG_DRIVER(
- "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
- amdgpu_crtc->crtc_id, amdgpu_crtc, work);
-
amdgpu_flip_work_func(&work->flip_work.work);
-}
-
-int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags,
- uint32_t target,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct amdgpu_bo *new_abo;
- struct amdgpu_flip_work *work;
- int r;
+ return 0;
- r = amdgpu_crtc_prepare_flip(crtc,
- fb,
- event,
- page_flip_flags,
- target,
- &work,
- &new_abo);
- if (r)
- return r;
+pflip_cleanup:
+ if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
+ DRM_ERROR("failed to reserve new abo in error path\n");
+ goto cleanup;
+ }
+unpin:
+ if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
+ DRM_ERROR("failed to unpin new abo in error path\n");
+ }
+unreserve:
+ amdgpu_bo_unreserve(new_abo);
- amdgpu_crtc_submit_flip(crtc, fb, work, new_abo);
+cleanup:
+ amdgpu_bo_unref(&work->old_abo);
+ dma_fence_put(work->excl);
+ for (i = 0; i < work->shared_count; ++i)
+ dma_fence_put(work->shared[i]);
+ kfree(work->shared);
+ kfree(work);
- return 0;
+ return r;
}
int amdgpu_crtc_set_config(struct drm_mode_set *set,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 4e0f7d2..ab6b0d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -63,9 +63,11 @@
* - 3.11.0 - Add support for sensor query info (clocks, temp, etc).
* - 3.12.0 - Add query for double offchip LDS buffers
* - 3.13.0 - Add PRT support
+ * - 3.14.0 - Fix race in amdgpu_ctx_get_fence() and note new functionality
+ * - 3.15.0 - Export more gpu info for gfx9
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 13
+#define KMS_DRIVER_MINOR 15
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -447,13 +449,16 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
/* Vega 10 */
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
{0, 0, 0}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index a48142d..c0d8c6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -112,7 +112,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
int ret;
- ret = amdgpu_bo_reserve(abo, false);
+ ret = amdgpu_bo_reserve(abo, true);
if (likely(ret == 0)) {
amdgpu_bo_kunmap(abo);
amdgpu_bo_unpin(abo);
@@ -425,10 +425,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
{
- struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
+ struct amdgpu_fbdev *afbdev;
struct drm_fb_helper *fb_helper;
int ret;
+ if (!adev)
+ return;
+
+ afbdev = adev->mode_info.rfbdev;
+
if (!afbdev)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 6d691ab..902e601 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -27,6 +27,9 @@
*/
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
#include "amdgpu.h"
/*
@@ -183,7 +186,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
if (adev->gart.robj == NULL) {
return;
}
- r = amdgpu_bo_reserve(adev->gart.robj, false);
+ r = amdgpu_bo_reserve(adev->gart.robj, true);
if (likely(r == 0)) {
amdgpu_bo_kunmap(adev->gart.robj);
amdgpu_bo_unpin(adev->gart.robj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 03a9c5c..94cb91c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -139,6 +139,35 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
return 0;
}
+static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
+{
+ /* if anything is swapped out don't swap it in here,
+ just abort and wait for the next CS */
+ if (!amdgpu_bo_gpu_accessible(bo))
+ return -ERESTARTSYS;
+
+ if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
+ return -ERESTARTSYS;
+
+ return 0;
+}
+
+static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct amdgpu_bo *bo =
+ container_of(entry->bo, struct amdgpu_bo, tbo);
+ if (amdgpu_gem_vm_check(NULL, bo))
+ return false;
+ }
+
+ return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
+}
+
void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
@@ -148,15 +177,13 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry vm_pd;
- struct list_head list, duplicates;
+ struct list_head list;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
- struct dma_fence *fence = NULL;
int r;
INIT_LIST_HEAD(&list);
- INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
tv.shared = true;
@@ -164,16 +191,18 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
return;
}
bo_va = amdgpu_vm_bo_find(vm, bo);
- if (bo_va) {
- if (--bo_va->ref_count == 0) {
- amdgpu_vm_bo_rmv(adev, bo_va);
+ if (bo_va && --bo_va->ref_count == 0) {
+ amdgpu_vm_bo_rmv(adev, bo_va);
+
+ if (amdgpu_gem_vm_ready(adev, vm, &list)) {
+ struct dma_fence *fence = NULL;
r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (unlikely(r)) {
@@ -502,19 +531,6 @@ out:
return r;
}
-static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
-{
- /* if anything is swapped out don't swap it in here,
- just abort and wait for the next CS */
- if (!amdgpu_bo_gpu_accessible(bo))
- return -ERESTARTSYS;
-
- if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
- return -ERESTARTSYS;
-
- return 0;
-}
-
/**
* amdgpu_gem_va_update_vm -update the bo_va in its VM
*
@@ -533,19 +549,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct list_head *list,
uint32_t operation)
{
- struct ttm_validate_buffer *entry;
int r = -ERESTARTSYS;
- list_for_each_entry(entry, list, head) {
- struct amdgpu_bo *bo =
- container_of(entry->bo, struct amdgpu_bo, tbo);
- if (amdgpu_gem_va_check(NULL, bo))
- goto error;
- }
-
- r = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_va_check,
- NULL);
- if (r)
+ if (!amdgpu_gem_vm_ready(adev, vm, list))
goto error;
r = amdgpu_vm_update_directories(adev, vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 0335c2f..f7d22c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -134,6 +134,15 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
return r;
}
+void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+
+ seq_printf(m, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
+ man->size, mgr->available, (u64)atomic64_read(&adev->gtt_usage) >> 20);
+
+}
/**
* amdgpu_gtt_mgr_new - allocate a new node
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index aab857d8..6e4ae0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -160,6 +160,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
+ if (ring->funcs->emit_pipeline_sync && job && job->need_pipeline_sync)
+ amdgpu_ring_emit_pipeline_sync(ring);
if (vm) {
r = amdgpu_vm_flush(ring, job);
@@ -217,7 +219,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vm_id)
- amdgpu_vm_reset_id(adev, job->vm_id);
+ amdgpu_vm_reset_id(adev, ring->funcs->vmhub,
+ job->vm_id);
amdgpu_ring_undo(ring);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 86a1242..7570f24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -57,6 +57,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
+ (*job)->need_pipeline_sync = false;
amdgpu_sync_create(&(*job)->sync);
@@ -139,7 +140,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
- if (fence == NULL && vm && !job->vm_id) {
+ while (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring;
int r;
@@ -152,6 +153,9 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
fence = amdgpu_sync_get_fence(&job->sync);
}
+ if (amd_sched_dependency_optimized(fence, sched_job->s_entity))
+ job->need_pipeline_sync = true;
+
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 832be63..96c3416 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -545,11 +545,22 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
adev->gfx.config.double_offchip_lds_buf;
if (amdgpu_ngg) {
- dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[PRIM].gpu_addr;
- dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[POS].gpu_addr;
- dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[CNTL].gpu_addr;
- dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[PARAM].gpu_addr;
+ dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
+ dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
+ dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
+ dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
+ dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
+ dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
+ dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
+ dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
}
+ dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
+ dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
+ dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
+ dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
+ dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
+ dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
+ dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
@@ -810,7 +821,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
if (amdgpu_sriov_vf(adev)) {
/* TODO: how to handle reserve failure */
- BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, false));
+ BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
fpriv->vm.csa_bo_va = NULL;
amdgpu_bo_unreserve(adev->virt.csa_obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index db8f8dd..dbd1061 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -597,21 +597,6 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags, uint32_t target,
struct drm_modeset_acquire_ctx *ctx);
-void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
- struct amdgpu_bo *new_abo);
-int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags,
- uint32_t target,
- struct amdgpu_flip_work **work,
- struct amdgpu_bo **new_abo);
-
-void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct amdgpu_flip_work *work,
- struct amdgpu_bo *new_abo);
-
extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index cb89fff..365883d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -295,7 +295,7 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
if (*bo == NULL)
return;
- if (likely(amdgpu_bo_reserve(*bo, false) == 0)) {
+ if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
if (cpu_addr)
amdgpu_bo_kunmap(*bo);
@@ -543,6 +543,27 @@ err:
return r;
}
+int amdgpu_bo_validate(struct amdgpu_bo *bo)
+{
+ uint32_t domain;
+ int r;
+
+ if (bo->pin_count)
+ return 0;
+
+ domain = bo->prefered_domains;
+
+retry:
+ amdgpu_ttm_placement_from_domain(bo, domain);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+ domain = bo->allowed_domains;
+ goto retry;
+ }
+
+ return r;
+}
+
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 15a723a..3824851 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -175,6 +175,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
struct reservation_object *resv,
struct dma_fence **fence, bool direct);
+int amdgpu_bo_validate(struct amdgpu_bo *bo);
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 990fde2..7df503a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -867,8 +867,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
- /* never 0 (full-speed), fuse or smc-controlled always */
- return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
+ return sprintf(buf, "%i\n", pwm_mode);
}
static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
@@ -887,14 +886,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
if (err)
return err;
- switch (value) {
- case 1: /* manual, percent-based */
- amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC);
- break;
- default: /* disable */
- amdgpu_dpm_set_fan_control_mode(adev, 0);
- break;
- }
+ amdgpu_dpm_set_fan_control_mode(adev, value);
return count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 3826d5a..6bdc866 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -113,7 +113,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
int ret = 0;
- ret = amdgpu_bo_reserve(bo, false);
+ ret = amdgpu_bo_reserve(bo, true);
if (unlikely(ret != 0))
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index ed6e579..ac5e92e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -55,6 +55,8 @@ static int psp_sw_init(void *handle)
psp->bootloader_load_sos = psp_v3_1_bootloader_load_sos;
psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
psp->ring_init = psp_v3_1_ring_init;
+ psp->ring_create = psp_v3_1_ring_create;
+ psp->ring_destroy = psp_v3_1_ring_destroy;
psp->cmd_submit = psp_v3_1_cmd_submit;
psp->compare_sram_data = psp_v3_1_compare_sram_data;
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
@@ -152,11 +154,6 @@ static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
static int psp_tmr_init(struct psp_context *psp)
{
int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
/*
* Allocate 3M memory aligned to 1M from Frame Buffer (local
@@ -168,22 +165,30 @@ static int psp_tmr_init(struct psp_context *psp)
ret = amdgpu_bo_create_kernel(psp->adev, 0x300000, 0x100000,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
- if (ret)
- goto failed;
+
+ return ret;
+}
+
+static int psp_tmr_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, 0x300000);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr, 1);
if (ret)
- goto failed_mem;
+ goto failed;
kfree(cmd);
return 0;
-failed_mem:
- amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
failed:
kfree(cmd);
return ret;
@@ -203,104 +208,78 @@ static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd,
cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
}
-static int psp_asd_load(struct psp_context *psp)
+static int psp_asd_init(struct psp_context *psp)
{
int ret;
- struct amdgpu_bo *asd_bo, *asd_shared_bo;
- uint64_t asd_mc_addr, asd_shared_mc_addr;
- void *asd_buf, *asd_shared_buf;
- struct psp_gfx_cmd_resp *cmd;
-
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
/*
* Allocate 16k memory aligned to 4k from Frame Buffer (local
* physical) for shared ASD <-> Driver
*/
- ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &asd_shared_bo, &asd_shared_mc_addr, &asd_buf);
- if (ret)
- goto failed;
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->asd_shared_bo,
+ &psp->asd_shared_mc_addr,
+ &psp->asd_shared_buf);
- /*
- * Allocate 256k memory aligned to 4k from Frame Buffer (local
- * physical) for ASD firmware
- */
- ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_BIN_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &asd_bo, &asd_mc_addr, &asd_buf);
- if (ret)
- goto failed_mem;
+ return ret;
+}
+
+static int psp_asd_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
- memcpy(asd_buf, psp->asd_start_addr, psp->asd_ucode_size);
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);
- psp_prep_asd_cmd_buf(cmd, asd_mc_addr, asd_shared_mc_addr,
+ psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->asd_shared_mc_addr,
psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr, 2);
- if (ret)
- goto failed_mem1;
- amdgpu_bo_free_kernel(&asd_bo, &asd_mc_addr, &asd_buf);
- amdgpu_bo_free_kernel(&asd_shared_bo, &asd_shared_mc_addr, &asd_shared_buf);
kfree(cmd);
- return 0;
-
-failed_mem1:
- amdgpu_bo_free_kernel(&asd_bo, &asd_mc_addr, &asd_buf);
-failed_mem:
- amdgpu_bo_free_kernel(&asd_shared_bo, &asd_shared_mc_addr, &asd_shared_buf);
-failed:
- kfree(cmd);
return ret;
}
-static int psp_load_fw(struct amdgpu_device *adev)
+static int psp_hw_start(struct psp_context *psp)
{
int ret;
- struct psp_gfx_cmd_resp *cmd;
- int i;
- struct amdgpu_firmware_info *ucode;
- struct psp_context *psp = &adev->psp;
-
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
ret = psp_bootloader_load_sysdrv(psp);
if (ret)
- goto failed;
+ return ret;
ret = psp_bootloader_load_sos(psp);
if (ret)
- goto failed;
-
- ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
- if (ret)
- goto failed;
+ return ret;
- ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &psp->fence_buf_bo,
- &psp->fence_buf_mc_addr,
- &psp->fence_buf);
+ ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
if (ret)
- goto failed;
-
- memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
+ return ret;
- ret = psp_tmr_init(psp);
+ ret = psp_tmr_load(psp);
if (ret)
- goto failed_mem;
+ return ret;
ret = psp_asd_load(psp);
if (ret)
- goto failed_mem;
+ return ret;
+
+ return 0;
+}
+
+static int psp_np_fw_load(struct psp_context *psp)
+{
+ int i, ret;
+ struct amdgpu_firmware_info *ucode;
+ struct amdgpu_device* adev = psp->adev;
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
@@ -310,15 +289,21 @@ static int psp_load_fw(struct amdgpu_device *adev)
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
psp_smu_reload_quirk(psp))
continue;
+ if (amdgpu_sriov_vf(adev) &&
+ (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G))
+ /*skip ucode loading in SRIOV VF */
+ continue;
- ret = psp_prep_cmd_buf(ucode, cmd);
+ ret = psp_prep_cmd_buf(ucode, psp->cmd);
if (ret)
- goto failed_mem;
+ return ret;
- ret = psp_cmd_submit_buf(psp, ucode, cmd,
+ ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
psp->fence_buf_mc_addr, i + 3);
if (ret)
- goto failed_mem;
+ return ret;
#if 0
/* check if firmware loaded sucessfully */
@@ -327,8 +312,59 @@ static int psp_load_fw(struct amdgpu_device *adev)
#endif
}
- amdgpu_bo_free_kernel(&psp->fence_buf_bo,
- &psp->fence_buf_mc_addr, &psp->fence_buf);
+ return 0;
+}
+
+static int psp_load_fw(struct amdgpu_device *adev)
+{
+ int ret;
+ struct psp_context *psp = &adev->psp;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp->cmd = cmd;
+
+ ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr,
+ &psp->fw_pri_buf);
+ if (ret)
+ goto failed;
+
+ ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->fence_buf_bo,
+ &psp->fence_buf_mc_addr,
+ &psp->fence_buf);
+ if (ret)
+ goto failed_mem1;
+
+ memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
+
+ ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
+ if (ret)
+ goto failed_mem1;
+
+ ret = psp_tmr_init(psp);
+ if (ret)
+ goto failed_mem;
+
+ ret = psp_asd_init(psp);
+ if (ret)
+ goto failed_mem;
+
+ ret = psp_hw_start(psp);
+ if (ret)
+ goto failed_mem;
+
+ ret = psp_np_fw_load(psp);
+ if (ret)
+ goto failed_mem;
+
kfree(cmd);
return 0;
@@ -336,6 +372,9 @@ static int psp_load_fw(struct amdgpu_device *adev)
failed_mem:
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
+failed_mem1:
+ amdgpu_bo_free_kernel(&psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
failed:
kfree(cmd);
return ret;
@@ -379,12 +418,24 @@ static int psp_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
- amdgpu_ucode_fini_bo(adev);
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
+ amdgpu_ucode_fini_bo(adev);
+
+ psp_ring_destroy(psp, PSP_RING_TYPE__KM);
if (psp->tmr_buf)
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
+ if (psp->fw_pri_buf)
+ amdgpu_bo_free_kernel(&psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
+
+ if (psp->fence_buf_bo)
+ amdgpu_bo_free_kernel(&psp->fence_buf_bo,
+ &psp->fence_buf_mc_addr, &psp->fence_buf);
+
return 0;
}
@@ -397,18 +448,30 @@ static int psp_resume(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct psp_context *psp = &adev->psp;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
+ DRM_INFO("PSP is resuming...\n");
+
mutex_lock(&adev->firmware.mutex);
- ret = psp_load_fw(adev);
+ ret = psp_hw_start(psp);
if (ret)
- DRM_ERROR("PSP resume failed\n");
+ goto failed;
+
+ ret = psp_np_fw_load(psp);
+ if (ret)
+ goto failed;
mutex_unlock(&adev->firmware.mutex);
+ return 0;
+
+failed:
+ DRM_ERROR("PSP resume failed\n");
+ mutex_unlock(&adev->firmware.mutex);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index e9f35e0..0301e4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -30,8 +30,8 @@
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
-#define PSP_ASD_BIN_SIZE 0x40000
#define PSP_ASD_SHARED_MEM_SIZE 0x4000
+#define PSP_1_MEG 0x100000
enum psp_ring_type
{
@@ -57,6 +57,7 @@ struct psp_context
{
struct amdgpu_device *adev;
struct psp_ring km_ring;
+ struct psp_gfx_cmd_resp *cmd;
int (*init_microcode)(struct psp_context *psp);
int (*bootloader_load_sysdrv)(struct psp_context *psp);
@@ -64,6 +65,9 @@ struct psp_context
int (*prep_cmd_buf)(struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
+ int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type);
+ int (*ring_destroy)(struct psp_context *psp,
+ enum psp_ring_type ring_type);
int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, int index);
bool (*compare_sram_data)(struct psp_context *psp,
@@ -71,6 +75,11 @@ struct psp_context
enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
+ /* fence buffer */
+ struct amdgpu_bo *fw_pri_bo;
+ uint64_t fw_pri_mc_addr;
+ void *fw_pri_buf;
+
/* sos firmware */
const struct firmware *sos_fw;
uint32_t sos_fw_version;
@@ -85,12 +94,15 @@ struct psp_context
uint64_t tmr_mc_addr;
void *tmr_buf;
- /* asd firmware */
+ /* asd firmware and buffer */
const struct firmware *asd_fw;
uint32_t asd_fw_version;
uint32_t asd_feature_version;
uint32_t asd_ucode_size;
uint8_t *asd_start_addr;
+ struct amdgpu_bo *asd_shared_bo;
+ uint64_t asd_shared_mc_addr;
+ void *asd_shared_buf;
/* fence buffer */
struct amdgpu_bo *fence_buf_bo;
@@ -105,6 +117,8 @@ struct amdgpu_psp_funcs {
#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type))
#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type))
+#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type))
+#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type)))
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
(psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
#define psp_compare_sram_data(psp, ucode, type) \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 63e5639..944443c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -99,6 +99,7 @@ struct amdgpu_ring_funcs {
uint32_t align_mask;
u32 nop;
bool support_64bit_ptrs;
+ unsigned vmhub;
/* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring);
@@ -178,6 +179,7 @@ struct amdgpu_ring {
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr;
+ unsigned vm_inv_eng;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index de9f919..5ca75a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -130,7 +130,7 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
return -EINVAL;
}
- r = amdgpu_bo_reserve(sa_manager->bo, false);
+ r = amdgpu_bo_reserve(sa_manager->bo, true);
if (!r) {
amdgpu_bo_kunmap(sa_manager->bo);
amdgpu_bo_unpin(sa_manager->bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index ee9d0f3..8601904 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -190,26 +190,29 @@ TRACE_EVENT(amdgpu_sched_run_job,
TRACE_EVENT(amdgpu_vm_grab_id,
- TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job),
+ TP_PROTO(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_job *job),
TP_ARGS(vm, ring, job),
TP_STRUCT__entry(
__field(struct amdgpu_vm *, vm)
__field(u32, ring)
- __field(u32, vmid)
+ __field(u32, vm_id)
+ __field(u32, vm_hub)
__field(u64, pd_addr)
__field(u32, needs_flush)
),
TP_fast_assign(
__entry->vm = vm;
- __entry->ring = ring;
- __entry->vmid = job->vm_id;
+ __entry->ring = ring->idx;
+ __entry->vm_id = job->vm_id;
+ __entry->vm_hub = ring->funcs->vmhub,
__entry->pd_addr = job->vm_pd_addr;
__entry->needs_flush = job->vm_needs_flush;
),
- TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u",
- __entry->vm, __entry->ring, __entry->vmid,
- __entry->pd_addr, __entry->needs_flush)
+ TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
+ __entry->vm, __entry->ring, __entry->vm_id,
+ __entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
);
TRACE_EVENT(amdgpu_vm_bo_map,
@@ -331,21 +334,25 @@ TRACE_EVENT(amdgpu_vm_copy_ptes,
);
TRACE_EVENT(amdgpu_vm_flush,
- TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
- TP_ARGS(pd_addr, ring, id),
+ TP_PROTO(struct amdgpu_ring *ring, unsigned vm_id,
+ uint64_t pd_addr),
+ TP_ARGS(ring, vm_id, pd_addr),
TP_STRUCT__entry(
- __field(u64, pd_addr)
__field(u32, ring)
- __field(u32, id)
+ __field(u32, vm_id)
+ __field(u32, vm_hub)
+ __field(u64, pd_addr)
),
TP_fast_assign(
+ __entry->ring = ring->idx;
+ __entry->vm_id = vm_id;
+ __entry->vm_hub = ring->funcs->vmhub;
__entry->pd_addr = pd_addr;
- __entry->ring = ring;
- __entry->id = id;
),
- TP_printk("ring=%u, id=%u, pd_addr=%010Lx",
- __entry->ring, __entry->id, __entry->pd_addr)
+ TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
+ __entry->ring, __entry->vm_id,
+ __entry->vm_hub,__entry->pd_addr)
);
TRACE_EVENT(amdgpu_bo_list_set,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 35d53a0..5db0230 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -203,7 +203,9 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo = container_of(bo, struct amdgpu_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (adev->mman.buffer_funcs_ring->ready == false) {
+ if (adev->mman.buffer_funcs &&
+ adev->mman.buffer_funcs_ring &&
+ adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
@@ -763,7 +765,7 @@ int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
{
struct amdgpu_ttm_tt *gtt, *tmp;
struct ttm_mem_reg bo_mem;
- uint32_t flags;
+ uint64_t flags;
int r;
bo_mem.mem_type = TTM_PL_TT;
@@ -1038,11 +1040,17 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
- if (bo->mem.mem_type == TTM_PL_VRAM &&
- bo->mem.start == AMDGPU_BO_INVALID_OFFSET) {
- unsigned long num_pages = bo->mem.num_pages;
- struct drm_mm_node *node = bo->mem.mm_node;
+ unsigned long num_pages = bo->mem.num_pages;
+ struct drm_mm_node *node = bo->mem.mm_node;
+
+ if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
+ return ttm_bo_eviction_valuable(bo, place);
+
+ switch (bo->mem.mem_type) {
+ case TTM_PL_TT:
+ return true;
+ case TTM_PL_VRAM:
/* Check each drm MM node individually */
while (num_pages) {
if (place->fpfn < (node->start + node->size) &&
@@ -1052,8 +1060,10 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
num_pages -= node->size;
++node;
}
+ break;
- return false;
+ default:
+ break;
}
return ttm_bo_eviction_valuable(bo, place);
@@ -1188,7 +1198,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
return;
amdgpu_ttm_debugfs_fini(adev);
if (adev->stollen_vga_memory) {
- r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
+ r = amdgpu_bo_reserve(adev->stollen_vga_memory, true);
if (r == 0) {
amdgpu_bo_unpin(adev->stollen_vga_memory);
amdgpu_bo_unreserve(adev->stollen_vga_memory);
@@ -1401,6 +1411,8 @@ error_free:
#if defined(CONFIG_DEBUG_FS)
+extern void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager
+ *man);
static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
@@ -1414,11 +1426,17 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
spin_lock(&glob->lru_lock);
drm_mm_print(mm, &p);
spin_unlock(&glob->lru_lock);
- if (ttm_pl == TTM_PL_VRAM)
+ switch (ttm_pl) {
+ case TTM_PL_VRAM:
seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
adev->mman.bdev.man[ttm_pl].size,
(u64)atomic64_read(&adev->vram_usage) >> 20,
(u64)atomic64_read(&adev->vram_vis_usage) >> 20);
+ break;
+ case TTM_PL_TT:
+ amdgpu_gtt_mgr_print(m, &adev->mman.bdev.man[TTM_PL_TT]);
+ break;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index a1891c9..dfd1c98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -382,10 +382,14 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE
* ucode info here
*/
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
- adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 4;
- else
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ if (amdgpu_sriov_vf(adev))
+ adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 3;
+ else
+ adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 4;
+ } else {
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM;
+ }
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index c853400..735c38d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -955,11 +955,11 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
uint32_t rptr = amdgpu_ring_get_rptr(ring);
unsigned i;
- int r;
+ int r, timeout = adev->usec_timeout;
- /* TODO: remove it if VCE can work for sriov */
+ /* workaround VCE ring test slow issue for sriov*/
if (amdgpu_sriov_vf(adev))
- return 0;
+ timeout *= 10;
r = amdgpu_ring_alloc(ring, 16);
if (r) {
@@ -970,13 +970,13 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, VCE_CMD_END);
amdgpu_ring_commit(ring);
- for (i = 0; i < adev->usec_timeout; i++) {
+ for (i = 0; i < timeout; i++) {
if (amdgpu_ring_get_rptr(ring) != rptr)
break;
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
+ if (i < timeout) {
DRM_INFO("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
@@ -999,10 +999,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *fence = NULL;
long r;
- /* TODO: remove it if VCE can work for sriov */
- if (amdgpu_sriov_vf(ring->adev))
- return 0;
-
/* skip vce ring1/2 ib test for now, since it's not reliable */
if (ring != &ring->adev->vce.ring[0])
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index ba8b8ae..6bf5cea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -225,3 +225,49 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
return 0;
}
+
+/**
+ * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
+ * @amdgpu: amdgpu device.
+ * MM table is used by UVD and VCE for its initialization
+ * Return: Zero if allocate success.
+ */
+int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
+ return 0;
+
+ r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->virt.mm_table.bo,
+ &adev->virt.mm_table.gpu_addr,
+ (void *)&adev->virt.mm_table.cpu_addr);
+ if (r) {
+ DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
+ return r;
+ }
+
+ memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
+ DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
+ adev->virt.mm_table.gpu_addr,
+ adev->virt.mm_table.cpu_addr);
+ return 0;
+}
+
+/**
+ * amdgpu_virt_free_mm_table() - free mm table memory
+ * @amdgpu: amdgpu device.
+ * Free MM table memory
+ */
+void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
+{
+ if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
+ return;
+
+ amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
+ &adev->virt.mm_table.gpu_addr,
+ (void *)&adev->virt.mm_table.cpu_addr);
+ adev->virt.mm_table.gpu_addr = 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 1ee0a19..a8ed162 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -98,5 +98,7 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary);
+int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
+void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 7ed5302..8ecf82c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -406,6 +406,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+ struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
struct dma_fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id, *idle;
@@ -413,16 +415,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
unsigned i;
int r = 0;
- fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
- GFP_KERNEL);
+ fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
if (!fences)
return -ENOMEM;
- mutex_lock(&adev->vm_manager.lock);
+ mutex_lock(&id_mgr->lock);
/* Check if we have an idle VMID */
i = 0;
- list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
+ list_for_each_entry(idle, &id_mgr->ids_lru, list) {
fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
if (!fences[i])
break;
@@ -430,7 +431,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}
/* If we can't find a idle VMID to use, wait till one becomes available */
- if (&idle->list == &adev->vm_manager.ids_lru) {
+ if (&idle->list == &id_mgr->ids_lru) {
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
struct dma_fence_array *array;
@@ -455,25 +456,19 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
- mutex_unlock(&adev->vm_manager.lock);
+ mutex_unlock(&id_mgr->lock);
return 0;
}
kfree(fences);
- job->vm_needs_flush = true;
+ job->vm_needs_flush = false;
/* Check if we can use a VMID already assigned to this VM */
- i = ring->idx;
- do {
+ list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
struct dma_fence *flushed;
-
- id = vm->ids[i++];
- if (i == AMDGPU_MAX_RINGS)
- i = 0;
+ bool needs_flush = false;
/* Check all the prerequisites to using this VMID */
- if (!id)
- continue;
if (amdgpu_vm_had_gpu_reset(adev, id))
continue;
@@ -483,16 +478,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (job->vm_pd_addr != id->pd_gpu_addr)
continue;
- if (!id->last_flush)
- continue;
-
- if (id->last_flush->context != fence_context &&
- !dma_fence_is_signaled(id->last_flush))
- continue;
+ if (!id->last_flush ||
+ (id->last_flush->context != fence_context &&
+ !dma_fence_is_signaled(id->last_flush)))
+ needs_flush = true;
flushed = id->flushed_updates;
- if (updates &&
- (!flushed || dma_fence_is_later(updates, flushed)))
+ if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
+ needs_flush = true;
+
+ /* Concurrent flushes are only possible starting with Vega10 */
+ if (adev->asic_type < CHIP_VEGA10 && needs_flush)
continue;
/* Good we can use this VMID. Remember this submission as
@@ -502,17 +498,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
- list_move_tail(&id->list, &adev->vm_manager.ids_lru);
- vm->ids[ring->idx] = id;
-
- job->vm_id = id - adev->vm_manager.ids;
- job->vm_needs_flush = false;
- trace_amdgpu_vm_grab_id(vm, ring->idx, job);
+ if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
+ }
- mutex_unlock(&adev->vm_manager.lock);
- return 0;
+ if (needs_flush)
+ goto needs_flush;
+ else
+ goto no_flush_needed;
- } while (i != ring->idx);
+ };
/* Still no ID to use? Then use the idle one found earlier */
id = idle;
@@ -522,23 +518,25 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
- dma_fence_put(id->last_flush);
- id->last_flush = NULL;
-
+ id->pd_gpu_addr = job->vm_pd_addr;
dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates);
-
- id->pd_gpu_addr = job->vm_pd_addr;
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
- list_move_tail(&id->list, &adev->vm_manager.ids_lru);
atomic64_set(&id->owner, vm->client_id);
- vm->ids[ring->idx] = id;
- job->vm_id = id - adev->vm_manager.ids;
- trace_amdgpu_vm_grab_id(vm, ring->idx, job);
+needs_flush:
+ job->vm_needs_flush = true;
+ dma_fence_put(id->last_flush);
+ id->last_flush = NULL;
+
+no_flush_needed:
+ list_move_tail(&id->list, &id_mgr->ids_lru);
+
+ job->vm_id = id - id_mgr->ids;
+ trace_amdgpu_vm_grab_id(vm, ring, job);
error:
- mutex_unlock(&adev->vm_manager.lock);
+ mutex_unlock(&id_mgr->lock);
return r;
}
@@ -590,7 +588,9 @@ static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
+ unsigned vmhub = ring->funcs->vmhub;
+ struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id];
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
id->gds_base != job->gds_base ||
id->gds_size != job->gds_size ||
@@ -614,27 +614,27 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
- if (ring->funcs->emit_pipeline_sync)
+ if (ring->funcs->emit_pipeline_sync && !job->need_pipeline_sync)
amdgpu_ring_emit_pipeline_sync(ring);
if (ring->funcs->emit_vm_flush && vm_flush_needed) {
u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
struct dma_fence *fence;
- trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
+ trace_amdgpu_vm_flush(ring, job->vm_id, pd_addr);
amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
r = amdgpu_fence_emit(ring, &fence);
if (r)
return r;
- mutex_lock(&adev->vm_manager.lock);
+ mutex_lock(&id_mgr->lock);
dma_fence_put(id->last_flush);
id->last_flush = fence;
- mutex_unlock(&adev->vm_manager.lock);
+ mutex_unlock(&id_mgr->lock);
}
- if (gds_switch_needed) {
+ if (ring->funcs->emit_gds_switch && gds_switch_needed) {
id->gds_base = job->gds_base;
id->gds_size = job->gds_size;
id->gws_base = job->gws_base;
@@ -666,10 +666,13 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
*
* Reset saved GDW, GWS and OA to force switch on next flush.
*/
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
+ unsigned vmid)
{
- struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
+ struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
+ atomic64_set(&id->owner, 0);
id->gds_base = 0;
id->gds_size = 0;
id->gws_base = 0;
@@ -679,6 +682,26 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
}
/**
+ * amdgpu_vm_reset_all_id - reset VMID to zero
+ *
+ * @adev: amdgpu device structure
+ *
+ * Reset VMID to force flush on next use
+ */
+void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
+{
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vm_id_manager *id_mgr =
+ &adev->vm_manager.id_mgr[i];
+
+ for (j = 1; j < id_mgr->num_ids; ++j)
+ amdgpu_vm_reset_id(adev, i, j);
+ }
+}
+
+/**
* amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
*
* @vm: requested vm
@@ -1336,6 +1359,12 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
flags &= ~AMDGPU_PTE_MTYPE_MASK;
flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
+ if ((mapping->flags & AMDGPU_PTE_PRT) &&
+ (adev->asic_type >= CHIP_VEGA10)) {
+ flags |= AMDGPU_PTE_PRT;
+ flags &= ~AMDGPU_PTE_VALID;
+ }
+
trace_amdgpu_vm_bo_update(mapping);
pfn = mapping->offset >> PAGE_SHIFT;
@@ -1629,8 +1658,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
- r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
- 0, 0, &f);
+ r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
+ mapping->start, mapping->last,
+ 0, 0, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
@@ -2117,10 +2147,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
unsigned ring_instance;
struct amdgpu_ring *ring;
struct amd_sched_rq *rq;
- int i, r;
+ int r;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- vm->ids[i] = NULL;
vm->va = RB_ROOT;
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
spin_lock_init(&vm->status_lock);
@@ -2241,16 +2269,21 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
*/
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
- unsigned i;
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vm_id_manager *id_mgr =
+ &adev->vm_manager.id_mgr[i];
- INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
+ mutex_init(&id_mgr->lock);
+ INIT_LIST_HEAD(&id_mgr->ids_lru);
- /* skip over VMID 0, since it is the system VM */
- for (i = 1; i < adev->vm_manager.num_ids; ++i) {
- amdgpu_vm_reset_id(adev, i);
- amdgpu_sync_create(&adev->vm_manager.ids[i].active);
- list_add_tail(&adev->vm_manager.ids[i].list,
- &adev->vm_manager.ids_lru);
+ /* skip over VMID 0, since it is the system VM */
+ for (j = 1; j < id_mgr->num_ids; ++j) {
+ amdgpu_vm_reset_id(adev, i, j);
+ amdgpu_sync_create(&id_mgr->ids[i].active);
+ list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
+ }
}
adev->vm_manager.fence_context =
@@ -2273,13 +2306,19 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
- unsigned i;
+ unsigned i, j;
- for (i = 0; i < AMDGPU_NUM_VM; ++i) {
- struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vm_id_manager *id_mgr =
+ &adev->vm_manager.id_mgr[i];
- amdgpu_sync_free(&adev->vm_manager.ids[i].active);
- dma_fence_put(id->flushed_updates);
- dma_fence_put(id->last_flush);
+ mutex_destroy(&id_mgr->lock);
+ for (j = 0; j < AMDGPU_NUM_VM; ++j) {
+ struct amdgpu_vm_id *id = &id_mgr->ids[j];
+
+ amdgpu_sync_free(&id->active);
+ dma_fence_put(id->flushed_updates);
+ dma_fence_put(id->last_flush);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index d9e5729..e1d951e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -65,7 +65,8 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
-#define AMDGPU_PTE_PRT (1ULL << 63)
+/* TILED for VEGA10, reserved for older ASICs */
+#define AMDGPU_PTE_PRT (1ULL << 51)
/* VEGA10 only */
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
@@ -114,9 +115,6 @@ struct amdgpu_vm {
struct dma_fence *last_dir_update;
uint64_t last_eviction_counter;
- /* for id and flush management per ring */
- struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
-
/* protecting freed */
spinlock_t freed_lock;
@@ -149,12 +147,16 @@ struct amdgpu_vm_id {
uint32_t oa_size;
};
+struct amdgpu_vm_id_manager {
+ struct mutex lock;
+ unsigned num_ids;
+ struct list_head ids_lru;
+ struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
+};
+
struct amdgpu_vm_manager {
/* Handling of VMIDs */
- struct mutex lock;
- unsigned num_ids;
- struct list_head ids_lru;
- struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
+ struct amdgpu_vm_id_manager id_mgr[AMDGPU_MAX_VMHUBS];
/* Handling of VM fences */
u64 fence_context;
@@ -200,7 +202,9 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
+ unsigned vmid);
+void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index a4831fe..a2c59a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -220,9 +220,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
}
const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
- amdgpu_vram_mgr_init,
- amdgpu_vram_mgr_fini,
- amdgpu_vram_mgr_new,
- amdgpu_vram_mgr_del,
- amdgpu_vram_mgr_debug
+ .init = amdgpu_vram_mgr_init,
+ .takedown = amdgpu_vram_mgr_fini,
+ .get_node = amdgpu_vram_mgr_new,
+ .put_node = amdgpu_vram_mgr_del,
+ .debug = amdgpu_vram_mgr_debug
};
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 8c9bc75..8a0818b 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
@@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
{
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 11ccda8..ec93714 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -906,6 +906,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
+ /* disable mclk switching if the refresh is >120Hz, even if the
+ * blanking period would allow it
+ */
+ if (amdgpu_dpm_get_vrefresh(adev) > 120)
+ return true;
+
if (vblank_time < switch_limit)
return true;
else
@@ -1267,30 +1273,33 @@ static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
{
- if (mode) {
- /* stop auto-manage */
+ switch (mode) {
+ case AMD_FAN_CTRL_NONE:
if (adev->pm.dpm.fan.ucode_fan_control)
ci_fan_ctrl_stop_smc_fan_control(adev);
- ci_fan_ctrl_set_static_mode(adev, mode);
- } else {
- /* restart auto-manage */
+ ci_dpm_set_fan_speed_percent(adev, 100);
+ break;
+ case AMD_FAN_CTRL_MANUAL:
+ if (adev->pm.dpm.fan.ucode_fan_control)
+ ci_fan_ctrl_stop_smc_fan_control(adev);
+ break;
+ case AMD_FAN_CTRL_AUTO:
if (adev->pm.dpm.fan.ucode_fan_control)
ci_thermal_start_smc_fan_control(adev);
- else
- ci_fan_ctrl_set_default_mode(adev);
+ break;
+ default:
+ break;
}
}
static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
{
struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
if (pi->fan_is_controlled_by_smc)
- return 0;
-
- tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
- return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
+ return AMD_FAN_CTRL_AUTO;
+ else
+ return AMD_FAN_CTRL_MANUAL;
}
#if 0
@@ -3036,6 +3045,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
memory_clock,
&memory_level->MinVddcPhases);
+ memory_level->EnabledForActivity = 1;
memory_level->EnabledForThrottle = 1;
memory_level->UpH = 0;
memory_level->DownH = 100;
@@ -3468,8 +3478,6 @@ static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
return ret;
}
- pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
-
if ((dpm_table->mclk_table.count >= 2) &&
((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
pi->smc_state_table.MemoryLevel[1].MinVddc =
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index ba98d35..5dffa27 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1207,8 +1207,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -2230,7 +2233,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(abo, false);
+ r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
amdgpu_bo_unpin(abo);
@@ -2589,7 +2592,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin:
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- ret = amdgpu_bo_reserve(aobj, false);
+ ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
@@ -2720,7 +2723,7 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(abo, false);
+ r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
else {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index e59bc42..47bbc87 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1176,8 +1176,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -2214,7 +2217,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(abo, false);
+ r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
amdgpu_bo_unpin(abo);
@@ -2609,7 +2612,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin:
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- ret = amdgpu_bo_reserve(aobj, false);
+ ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
@@ -2740,7 +2743,7 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(abo, false);
+ r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
else {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 307269b..d8c9a95 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -979,12 +979,15 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
u32 priority_a_mark = 0, priority_b_mark = 0;
u32 priority_a_cnt = PRIORITY_OFF;
u32 priority_b_cnt = PRIORITY_OFF;
- u32 tmp, arb_control3;
+ u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
fixed20_12 a, b, c;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
@@ -1091,6 +1094,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
c.full = dfixed_div(c, a);
priority_b_mark = dfixed_trunc(c);
priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+
+ lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
@@ -1120,6 +1125,9 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
amdgpu_crtc->wm_high = latency_watermark_a;
+
+ /* Save number of lines the linebuffer leads before the scanout */
+ amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
/* watermark setup */
@@ -1640,7 +1648,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(abo, false);
+ r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
amdgpu_bo_unpin(abo);
@@ -1957,7 +1965,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin:
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- ret = amdgpu_bo_reserve(aobj, false);
+ ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
@@ -2083,7 +2091,7 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(abo, false);
+ r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
else {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 6df7a28..db30c6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1091,8 +1091,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -2089,7 +2092,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(abo, false);
+ r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
amdgpu_bo_unpin(abo);
@@ -2440,7 +2443,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin:
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- ret = amdgpu_bo_reserve(aobj, false);
+ ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
@@ -2571,7 +2574,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(abo, false);
+ r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
else {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 81a24b6..f1b479b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -248,7 +248,7 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(abo, false);
+ r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
else {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 4c4874f..a125f9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1579,7 +1579,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
static void gfx_v6_0_config_init(struct amdgpu_device *adev)
{
- adev->gfx.config.double_offchip_lds_buf = 1;
+ adev->gfx.config.double_offchip_lds_buf = 0;
}
static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
@@ -2437,7 +2437,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
int r;
if (adev->gfx.rlc.save_restore_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+ r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
@@ -2448,7 +2448,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
}
if (adev->gfx.rlc.clear_state_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
@@ -2459,7 +2459,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
}
if (adev->gfx.rlc.cp_table_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
@@ -3292,7 +3292,7 @@ static int gfx_v6_0_sw_init(void *handle)
ring->me = 1;
ring->pipe = i;
ring->queue = i;
- sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
+ sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq, irq_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 8a8bc2f..ee2f213 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1935,7 +1935,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
INDEX_STRIDE, 3);
mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < adev->vm_manager.num_ids; i++) {
+ for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
if (i == 0)
sh_mem_base = 0;
else
@@ -2792,7 +2792,7 @@ static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
if (ring->mqd_obj) {
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ r = amdgpu_bo_reserve(ring->mqd_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
@@ -2810,7 +2810,7 @@ static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
int r;
if (adev->gfx.mec.hpd_eop_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
+ r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
@@ -3359,7 +3359,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
/* save restore block */
if (adev->gfx.rlc.save_restore_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
+ r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
@@ -3371,7 +3371,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
/* clear state block */
if (adev->gfx.rlc.clear_state_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
@@ -3383,7 +3383,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
/* clear state block */
if (adev->gfx.rlc.cp_table_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index dad8a4c..758d636 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1239,7 +1239,7 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
/* clear state block */
if (adev->gfx.rlc.clear_state_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
@@ -1250,7 +1250,7 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
/* jump table block */
if (adev->gfx.rlc.cp_table_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
@@ -1363,7 +1363,7 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
int r;
if (adev->gfx.mec.hpd_eop_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
+ r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
@@ -1490,7 +1490,7 @@ static int gfx_v8_0_kiq_init(struct amdgpu_device *adev)
memset(hpd, 0, MEC_HPD_SIZE);
- r = amdgpu_bo_reserve(kiq->eop_obj, false);
+ r = amdgpu_bo_reserve(kiq->eop_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
amdgpu_bo_kunmap(kiq->eop_obj);
@@ -1932,6 +1932,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
case 0xca:
case 0xce:
case 0x88:
+ case 0xe6:
/* B6 */
adev->gfx.config.max_cu_per_sh = 6;
break;
@@ -1964,17 +1965,28 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.max_backends_per_se = 1;
switch (adev->pdev->revision) {
+ case 0x80:
+ case 0x81:
case 0xc0:
case 0xc1:
case 0xc2:
case 0xc4:
case 0xc8:
case 0xc9:
+ case 0xd6:
+ case 0xda:
+ case 0xe9:
+ case 0xea:
adev->gfx.config.max_cu_per_sh = 3;
break;
+ case 0x83:
case 0xd0:
case 0xd1:
case 0xd2:
+ case 0xd4:
+ case 0xdb:
+ case 0xe1:
+ case 0xe2:
default:
adev->gfx.config.max_cu_per_sh = 2;
break;
@@ -3890,7 +3902,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
INDEX_STRIDE, 3);
mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < adev->vm_manager.num_ids; i++) {
+ for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
vi_srbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
if (i == 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index a447b708..0c16b75 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -39,7 +39,6 @@
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_NUM_COMPUTE_RINGS 8
-#define GFX9_NUM_SE 4
#define RLCG_UCODE_LOADING_START_ADDRESS 0x2000
MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
@@ -453,7 +452,7 @@ static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
int r;
if (adev->gfx.mec.hpd_eop_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
+ r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
@@ -463,7 +462,7 @@ static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
adev->gfx.mec.hpd_eop_obj = NULL;
}
if (adev->gfx.mec.mec_fw_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false);
+ r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj);
@@ -599,7 +598,7 @@ static int gfx_v9_0_kiq_init(struct amdgpu_device *adev)
memset(hpd, 0, MEC_HPD_SIZE);
- r = amdgpu_bo_reserve(kiq->eop_obj, false);
+ r = amdgpu_bo_reserve(kiq->eop_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
amdgpu_bo_kunmap(kiq->eop_obj);
@@ -631,7 +630,6 @@ static int gfx_v9_0_kiq_init_ring(struct amdgpu_device *adev,
ring->pipe = 1;
}
- irq->data = ring;
ring->queue = 0;
ring->eop_gpu_addr = kiq->eop_gpu_addr;
sprintf(ring->name, "kiq %d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -647,7 +645,6 @@ static void gfx_v9_0_kiq_free_ring(struct amdgpu_ring *ring,
{
amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
amdgpu_ring_fini(ring);
- irq->data = NULL;
}
/* create MQD for each compute queue */
@@ -705,19 +702,19 @@ static void gfx_v9_0_compute_mqd_sw_fini(struct amdgpu_device *adev)
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_IND_INDEX),
+ WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(address << SQ_IND_INDEX__INDEX__SHIFT) |
(SQ_IND_INDEX__FORCE_READ_MASK));
- return RREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_IND_DATA));
+ return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
}
static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t regno, uint32_t num, uint32_t *out)
{
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_IND_INDEX),
+ WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(regno << SQ_IND_INDEX__INDEX__SHIFT) |
@@ -725,7 +722,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
(SQ_IND_INDEX__FORCE_READ_MASK) |
(SQ_IND_INDEX__AUTO_INCR_MASK));
while (num--)
- *(out++) = RREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_IND_DATA));
+ *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
}
static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
@@ -774,7 +771,6 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
adev->gfx.config.max_shader_engines = 4;
- adev->gfx.config.max_tile_pipes = 8; //??
adev->gfx.config.max_cu_per_sh = 16;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 4;
@@ -787,6 +783,8 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ adev->gfx.config.gs_vgt_table_depth = 32;
+ adev->gfx.config.gs_prim_buffer_depth = 1792;
gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
break;
default:
@@ -801,6 +799,10 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
NUM_PIPES);
+
+ adev->gfx.config.max_tile_pipes =
+ adev->gfx.config.gb_addr_config_fields.num_pipes;
+
adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
REG_GET_FIELD(
adev->gfx.config.gb_addr_config,
@@ -841,7 +843,7 @@ static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
}
size_se = size_se ? size_se : default_size_se;
- ngg_buf->size = size_se * GFX9_NUM_SE;
+ ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
&ngg_buf->bo,
@@ -888,7 +890,7 @@ static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
adev->gfx.ngg.gds_reserve_addr += adev->gds.mem.gfx_partition_size;
/* Primitive Buffer */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[PRIM],
+ r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
amdgpu_prim_buf_per_se,
64 * 1024);
if (r) {
@@ -897,7 +899,7 @@ static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
}
/* Position Buffer */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[POS],
+ r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
amdgpu_pos_buf_per_se,
256 * 1024);
if (r) {
@@ -906,7 +908,7 @@ static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
}
/* Control Sideband */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[CNTL],
+ r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
amdgpu_cntl_sb_buf_per_se,
256);
if (r) {
@@ -918,7 +920,7 @@ static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
if (amdgpu_param_buf_per_se <= 0)
goto out;
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[PARAM],
+ r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
amdgpu_param_buf_per_se,
512 * 1024);
if (r) {
@@ -947,47 +949,47 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
/* Program buffer size */
data = 0;
- size = adev->gfx.ngg.buf[PRIM].size / 256;
+ size = adev->gfx.ngg.buf[NGG_PRIM].size / 256;
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, size);
- size = adev->gfx.ngg.buf[POS].size / 256;
+ size = adev->gfx.ngg.buf[NGG_POS].size / 256;
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, size);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmWD_BUF_RESOURCE_1), data);
+ WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
data = 0;
- size = adev->gfx.ngg.buf[CNTL].size / 256;
+ size = adev->gfx.ngg.buf[NGG_CNTL].size / 256;
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, size);
- size = adev->gfx.ngg.buf[PARAM].size / 1024;
+ size = adev->gfx.ngg.buf[NGG_PARAM].size / 1024;
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, size);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmWD_BUF_RESOURCE_2), data);
+ WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
/* Program buffer base address */
- base = lower_32_bits(adev->gfx.ngg.buf[PRIM].gpu_addr);
+ base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmWD_INDEX_BUF_BASE), data);
+ WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
- base = upper_32_bits(adev->gfx.ngg.buf[PRIM].gpu_addr);
+ base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmWD_INDEX_BUF_BASE_HI), data);
+ WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
- base = lower_32_bits(adev->gfx.ngg.buf[POS].gpu_addr);
+ base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmWD_POS_BUF_BASE), data);
+ WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
- base = upper_32_bits(adev->gfx.ngg.buf[POS].gpu_addr);
+ base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmWD_POS_BUF_BASE_HI), data);
+ WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
- base = lower_32_bits(adev->gfx.ngg.buf[CNTL].gpu_addr);
+ base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmWD_CNTL_SB_BUF_BASE), data);
+ WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
- base = upper_32_bits(adev->gfx.ngg.buf[CNTL].gpu_addr);
+ base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI), data);
+ WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
/* Clear GDS reserved memory */
r = amdgpu_ring_alloc(ring, 17);
@@ -1096,7 +1098,7 @@ static int gfx_v9_0_sw_init(void *handle)
ring->pipe = i / 8;
ring->queue = i % 8;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE);
- sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
+ sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
@@ -1203,7 +1205,7 @@ static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
}
- WREG32( SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
}
static u32 gfx_v9_0_create_bitmask(u32 bit_width)
@@ -1215,8 +1217,8 @@ static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
- data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCC_RB_BACKEND_DISABLE));
- data |= RREG32(SOC15_REG_OFFSET(GC, 0, mmGC_USER_RB_BACKEND_DISABLE));
+ data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
+ data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
@@ -1276,8 +1278,8 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
soc15_grbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
+ WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
}
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
@@ -1304,8 +1306,8 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
tmp = 0;
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), tmp);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), 0);
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
+ WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
}
soc15_grbm_select(adev, 0, 0, 0, 0);
@@ -1320,7 +1322,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
*/
gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_FIFO_SIZE),
+ WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
(adev->gfx.config.sc_prim_fifo_size_frontend <<
PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
(adev->gfx.config.sc_prim_fifo_size_backend <<
@@ -1343,7 +1345,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
for (k = 0; k < adev->usec_timeout; k++) {
- if (RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY)) == 0)
+ if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
udelay(1);
}
@@ -1357,7 +1359,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
for (k = 0; k < adev->usec_timeout; k++) {
- if ((RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY)) & mask) == 0)
+ if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
break;
udelay(1);
}
@@ -1366,7 +1368,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
- u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
+ u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
if (enable)
return;
@@ -1376,15 +1378,15 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), tmp);
+ WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
}
void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL));
+ u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL), tmp);
+ WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
@@ -1415,17 +1417,17 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
#ifdef AMDGPU_RLC_DEBUG_RETRY
/* RLC_GPM_GENERAL_6 : RLC Ucode version */
- rlc_ucode_ver = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_GENERAL_6));
+ rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
if(rlc_ucode_ver == 0x108) {
DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
rlc_ucode_ver, adev->gfx.rlc_fw_version);
/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
* default is 0x9C4 to create a 100us interval */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_TIMER_INT_3), 0x9C4);
+ WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
* to disable the page fault retry interrupts, default is
* 0x100 (256) */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_GENERAL_12), 0x100);
+ WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
}
#endif
}
@@ -1446,11 +1448,11 @@ static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR),
+ WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
RLCG_UCODE_LOADING_START_ADDRESS);
for (i = 0; i < fw_size; i++)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA), le32_to_cpup(fw_data++));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR), adev->gfx.rlc_fw_version);
+ WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
return 0;
}
@@ -1465,10 +1467,10 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
gfx_v9_0_rlc_stop(adev);
/* disable CG */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL), 0);
+ WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
/* disable PG */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), 0);
+ WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
gfx_v9_0_rlc_reset(adev);
@@ -1487,7 +1489,7 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
int i;
- u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL));
+ u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
@@ -1496,7 +1498,7 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].ready = false;
}
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL), tmp);
+ WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
udelay(50);
}
@@ -1529,30 +1531,30 @@ static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
(adev->gfx.pfp_fw->data +
le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR), 0);
+ WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA), le32_to_cpup(fw_data++));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR), adev->gfx.pfp_fw_version);
+ WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
/* CE */
fw_data = (const __le32 *)
(adev->gfx.ce_fw->data +
le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR), 0);
+ WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA), le32_to_cpup(fw_data++));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR), adev->gfx.ce_fw_version);
+ WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
/* ME */
fw_data = (const __le32 *)
(adev->gfx.me_fw->data +
le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_RAM_WADDR), 0);
+ WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
for (i = 0; i < fw_size; i++)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_RAM_DATA), le32_to_cpup(fw_data++));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_RAM_WADDR), adev->gfx.me_fw_version);
+ WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+ WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
return 0;
}
@@ -1594,8 +1596,8 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
int r, i;
/* init the CP */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MAX_CONTEXT), adev->gfx.config.max_hw_contexts - 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_DEVICE_ID), 1);
+ WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
+ WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
gfx_v9_0_cp_gfx_enable(adev, true);
@@ -1650,10 +1652,10 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
u64 rb_addr, rptr_addr, wptr_gpu_addr;
/* Set the write pointer delay */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_DELAY), 0);
+ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
/* set the RB to use vmid 0 */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_VMID), 0);
+ WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
/* Set ring buffer size */
ring = &adev->gfx.gfx_ring[0];
@@ -1663,30 +1665,30 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
#endif
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_CNTL), tmp);
+ WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
/* Initialize the ring buffer's write pointers */
ring->wptr = 0;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_WPTR), lower_32_bits(ring->wptr));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_WPTR_HI), upper_32_bits(ring->wptr));
+ WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
/* set the wb address wether it's enabled or not */
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_RPTR_ADDR), lower_32_bits(rptr_addr));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_RPTR_ADDR_HI), upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
+ WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO), lower_32_bits(wptr_gpu_addr));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI), upper_32_bits(wptr_gpu_addr));
+ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
+ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
mdelay(1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_CNTL), tmp);
+ WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
rb_addr = ring->gpu_addr >> 8;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_BASE), rb_addr);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_BASE_HI), upper_32_bits(rb_addr));
+ WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
+ WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_DOORBELL_CONTROL));
+ tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
if (ring->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
DOORBELL_OFFSET, ring->doorbell_index);
@@ -1695,13 +1697,13 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
} else {
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
}
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_DOORBELL_CONTROL), tmp);
+ WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
DOORBELL_RANGE_LOWER, ring->doorbell_index);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER), tmp);
+ WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER),
+ WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
@@ -1717,9 +1719,9 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
int i;
if (enable) {
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_CNTL), 0);
+ WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
} else {
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_CNTL),
+ WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
adev->gfx.compute_ring[i].ready = false;
@@ -1756,21 +1758,21 @@ static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
tmp = 0;
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_IC_BASE_CNTL), tmp);
+ WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_IC_BASE_LO),
+ WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_IC_BASE_HI),
+ WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
/* MEC1 */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR),
+ WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
mec_hdr->jt_offset);
for (i = 0; i < mec_hdr->jt_size; i++)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA),
+ WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR),
+ WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
adev->gfx.mec_fw_version);
/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
@@ -1785,7 +1787,7 @@ static void gfx_v9_0_cp_compute_fini(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
if (ring->mqd_obj) {
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ r = amdgpu_bo_reserve(ring->mqd_obj, true);
if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
@@ -1823,12 +1825,12 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
/* tell RLC which is KIQ queue */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
+ tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), tmp);
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
tmp |= 0x80;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), tmp);
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
}
static void gfx_v9_0_kiq_enable(struct amdgpu_ring *ring)
@@ -1898,14 +1900,14 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_CONTROL));
+ tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
(order_base_2(MEC_HPD_SIZE / 4) - 1));
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL));
+ tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
if (ring->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -1935,7 +1937,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
/* set MQD vmid to 0 */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MQD_CONTROL));
+ tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
mqd->cp_mqd_control = tmp;
@@ -1945,7 +1947,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
/* set up the HQD, this is similar to CP_RB0_CNTL */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_CONTROL));
+ tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(ring->ring_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
@@ -1973,7 +1975,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
tmp = 0;
/* enable the doorbell if requested */
if (ring->use_doorbell) {
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL));
+ tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_OFFSET, ring->doorbell_index);
@@ -1989,15 +1991,20 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
ring->wptr = 0;
- mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
+ mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
/* set the vmid for the queue */
mqd->cp_hqd_vmid = 0;
- tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
+ tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
mqd->cp_hqd_persistent_state = tmp;
+ /* set MIN_IB_AVAIL_SIZE */
+ tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
+ mqd->cp_hqd_ib_control = tmp;
+
/* activate the queue */
mqd->cp_hqd_active = 1;
@@ -2013,94 +2020,94 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
/* disable wptr polling */
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_BASE_ADDR),
+ WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
mqd->cp_hqd_eop_base_addr_lo);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI),
+ WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
mqd->cp_hqd_eop_base_addr_hi);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_CONTROL),
+ WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
mqd->cp_hqd_eop_control);
/* enable doorbell? */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
mqd->cp_hqd_pq_doorbell_control);
/* disable the queue if it's active */
- if (RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)) & 1) {
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), 1);
+ if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
for (j = 0; j < adev->usec_timeout; j++) {
- if (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)) & 1))
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
break;
udelay(1);
}
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST),
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
mqd->cp_hqd_dequeue_request);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_RPTR),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
mqd->cp_hqd_pq_rptr);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
mqd->cp_hqd_pq_wptr_lo);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
mqd->cp_hqd_pq_wptr_hi);
}
/* set the pointer to the MQD */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR),
+ WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
mqd->cp_mqd_base_addr_lo);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR_HI),
+ WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
mqd->cp_mqd_base_addr_hi);
/* set MQD vmid to 0 */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MQD_CONTROL),
+ WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
mqd->cp_mqd_control);
/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
mqd->cp_hqd_pq_base_lo);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
mqd->cp_hqd_pq_base_hi);
/* set up the HQD, this is similar to CP_RB0_CNTL */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_CONTROL),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
mqd->cp_hqd_pq_control);
/* set the wb address whether it's enabled or not */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
mqd->cp_hqd_pq_rptr_report_addr_lo);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
mqd->cp_hqd_pq_rptr_report_addr_hi);
/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
mqd->cp_hqd_pq_wptr_poll_addr_lo);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
mqd->cp_hqd_pq_wptr_poll_addr_hi);
/* enable the doorbell if requested */
if (ring->use_doorbell) {
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER),
+ WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
(AMDGPU_DOORBELL64_KIQ *2) << 2);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER),
+ WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
(AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
}
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
mqd->cp_hqd_pq_doorbell_control);
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
mqd->cp_hqd_pq_wptr_lo);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
mqd->cp_hqd_pq_wptr_hi);
/* set the vmid for the queue */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_VMID), mqd->cp_hqd_vmid);
+ WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PERSISTENT_STATE),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
mqd->cp_hqd_persistent_state);
/* activate the queue */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE),
+ WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
mqd->cp_hqd_active);
if (ring->use_doorbell)
@@ -2323,7 +2330,7 @@ static bool gfx_v9_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (REG_GET_FIELD(RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)),
+ if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
return false;
else
@@ -2338,7 +2345,7 @@ static int gfx_v9_0_wait_for_idle(void *handle)
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)) &
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
GRBM_STATUS__GUI_ACTIVE_MASK;
if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
@@ -2355,7 +2362,7 @@ static int gfx_v9_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* GRBM_STATUS */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS));
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
@@ -2374,7 +2381,7 @@ static int gfx_v9_0_soft_reset(void *handle)
}
/* GRBM_STATUS2 */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2));
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
@@ -2391,17 +2398,17 @@ static int gfx_v9_0_soft_reset(void *handle)
gfx_v9_0_cp_compute_enable(adev, false);
if (grbm_soft_reset) {
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET));
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
tmp |= grbm_soft_reset;
dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET), tmp);
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET));
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
udelay(50);
tmp &= ~grbm_soft_reset;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET), tmp);
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET));
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
}
/* Wait a little for things to settle down */
@@ -2415,9 +2422,9 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
uint64_t clock;
mutex_lock(&adev->gfx.gpu_clock_mutex);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT), 1);
- clock = (uint64_t)RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB)) |
- ((uint64_t)RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB)) << 32ULL);
+ WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&adev->gfx.gpu_clock_mutex);
return clock;
}
@@ -2497,7 +2504,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
return;
/* if RLC is not enabled, do nothing */
- rlc_setting = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL));
+ rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
return;
@@ -2506,7 +2513,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
AMD_CG_SUPPORT_GFX_3D_CGCG)) {
data = RLC_SAFE_MODE__CMD_MASK;
data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), data);
+ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
/* wait for RLC_SAFE_MODE */
for (i = 0; i < adev->usec_timeout; i++) {
@@ -2526,7 +2533,7 @@ static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
return;
/* if RLC is not enabled, do nothing */
- rlc_setting = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL));
+ rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
return;
@@ -2537,7 +2544,7 @@ static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
* mode.
*/
data = RLC_SAFE_MODE__CMD_MASK;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), data);
+ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
adev->gfx.rlc.in_safe_mode = false;
}
}
@@ -2550,7 +2557,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
- def = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
+ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
@@ -2560,48 +2567,48 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
if (def != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE), data);
+ WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
/* MGLS is a global flag to control all MGLS in GFX */
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
/* 2 - RLC memory Light sleep */
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
- def = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
+ def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
if (def != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL), data);
+ WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
}
/* 3 - CP memory Light sleep */
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
- def = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
+ def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
if (def != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL), data);
+ WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
} else {
/* 1 - MGCG_OVERRIDE */
- def = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
+ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
if (def != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE), data);
+ WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
/* 2 - disable MGLS in RLC */
- data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
+ data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL), data);
+ WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
}
/* 3 - disable MGLS in CP */
- data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
+ data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL), data);
+ WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
}
@@ -2616,37 +2623,37 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
/* Enable 3D CGCG/CGLS */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
/* write cmd to clear cgcg/cgls ov */
- def = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
+ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset CGCG override */
data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
/* update CGCG and CGLS override bits */
if (def != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE), data);
+ WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
/* enable 3Dcgcg FSM(0x0020003f) */
- def = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
+ def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
if (def != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D), data);
+ WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
/* set IDLE_POLL_COUNT(0x00900100) */
- def = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
+ def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
if (def != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
+ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
} else {
/* Disable CGCG/CGLS */
- def = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
+ def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
/* disable cgcg, cgls should be disabled */
data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
/* disable cgcg and cgls in FSM */
if (def != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D), data);
+ WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
}
adev->gfx.rlc.funcs->exit_safe_mode(adev);
@@ -2660,7 +2667,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
adev->gfx.rlc.funcs->enter_safe_mode(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
- def = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
+ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset CGCG override */
data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
@@ -2669,31 +2676,31 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
/* update CGCG and CGLS override bits */
if (def != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE), data);
+ WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
/* enable cgcg FSM(0x0020003F) */
- def = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
+ def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
if (def != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL), data);
+ WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
/* set IDLE_POLL_COUNT(0x00900100) */
- def = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
+ def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
if (def != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
+ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
} else {
- def = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
+ def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
/* reset CGCG/CGLS bits */
data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
/* disable cgcg and cgls in FSM */
if (def != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL), data);
+ WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
}
adev->gfx.rlc.funcs->exit_safe_mode(adev);
@@ -2740,6 +2747,9 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
switch (adev->asic_type) {
case CHIP_VEGA10:
gfx_v9_0_update_gfx_clock_gating(adev,
@@ -2760,12 +2770,12 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
*flags = 0;
/* AMD_CG_SUPPORT_GFX_MGCG */
- data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
+ data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
*flags |= AMD_CG_SUPPORT_GFX_MGCG;
/* AMD_CG_SUPPORT_GFX_CGCG */
- data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
+ data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CGCG;
@@ -2774,17 +2784,17 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
*flags |= AMD_CG_SUPPORT_GFX_CGLS;
/* AMD_CG_SUPPORT_GFX_RLC_LS */
- data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
+ data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
/* AMD_CG_SUPPORT_GFX_CP_LS */
- data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
+ data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
/* AMD_CG_SUPPORT_GFX_3D_CGCG */
- data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
+ data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
@@ -2807,8 +2817,8 @@ static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
} else {
- wptr = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_WPTR));
- wptr += (u64)RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_WPTR_HI)) << 32;
+ wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
+ wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
}
return wptr;
@@ -2823,8 +2833,8 @@ static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
WDOORBELL64(ring->doorbell_index, ring->wptr);
} else {
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_WPTR), lower_32_bits(ring->wptr));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_WPTR_HI), upper_32_bits(ring->wptr));
+ WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
}
}
@@ -2956,35 +2966,29 @@ static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
- unsigned eng = ring->idx;
- unsigned i;
+ unsigned eng = ring->vm_inv_eng;
pd_addr = pd_addr | 0x1; /* valid bit */
/* now only use physical base address of PDE and valid */
BUG_ON(pd_addr & 0xFFFF00000000003EULL);
- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
-
- gfx_v9_0_write_data_to_reg(ring, usepfp, true,
- hub->ctx0_ptb_addr_lo32
- + (2 * vm_id),
- lower_32_bits(pd_addr));
+ gfx_v9_0_write_data_to_reg(ring, usepfp, true,
+ hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
+ lower_32_bits(pd_addr));
- gfx_v9_0_write_data_to_reg(ring, usepfp, true,
- hub->ctx0_ptb_addr_hi32
- + (2 * vm_id),
- upper_32_bits(pd_addr));
+ gfx_v9_0_write_data_to_reg(ring, usepfp, true,
+ hub->ctx0_ptb_addr_hi32 + (2 * vm_id),
+ upper_32_bits(pd_addr));
- gfx_v9_0_write_data_to_reg(ring, usepfp, true,
- hub->vm_inv_eng0_req + eng, req);
+ gfx_v9_0_write_data_to_reg(ring, usepfp, true,
+ hub->vm_inv_eng0_req + eng, req);
- /* wait for the invalidate to complete */
- gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
- eng, 0, 1 << vm_id, 1 << vm_id, 0x20);
- }
+ /* wait for the invalidate to complete */
+ gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
+ eng, 0, 1 << vm_id, 1 << vm_id, 0x20);
/* compute doesn't have PFP */
if (usepfp) {
@@ -3373,9 +3377,7 @@ static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
uint32_t tmp, target;
- struct amdgpu_ring *ring = (struct amdgpu_ring *)src->data;
-
- BUG_ON(!ring || (ring->funcs->type != AMDGPU_RING_TYPE_KIQ));
+ struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
if (ring->me == 1)
target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
@@ -3386,20 +3388,20 @@ static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
switch (type) {
case AMDGPU_CP_KIQ_IRQ_DRIVER0:
if (state == AMDGPU_IRQ_STATE_DISABLE) {
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL));
+ tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
GENERIC2_INT_ENABLE, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), tmp);
+ WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
tmp = RREG32(target);
tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
GENERIC2_INT_ENABLE, 0);
WREG32(target, tmp);
} else {
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL));
+ tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
GENERIC2_INT_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), tmp);
+ WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
tmp = RREG32(target);
tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
@@ -3419,9 +3421,7 @@ static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
u8 me_id, pipe_id, queue_id;
- struct amdgpu_ring *ring = (struct amdgpu_ring *)source->data;
-
- BUG_ON(!ring || (ring->funcs->type != AMDGPU_RING_TYPE_KIQ));
+ struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
@@ -3456,13 +3456,14 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
+ .vmhub = AMDGPU_GFXHUB,
.get_rptr = gfx_v9_0_ring_get_rptr_gfx,
.get_wptr = gfx_v9_0_ring_get_wptr_gfx,
.set_wptr = gfx_v9_0_ring_set_wptr_gfx,
.emit_frame_size = /* totally 242 maximum if 16 IBs */
5 + /* COND_EXEC */
7 + /* PIPELINE_SYNC */
- 46 + /* VM_FLUSH */
+ 24 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
@@ -3500,6 +3501,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
+ .vmhub = AMDGPU_GFXHUB,
.get_rptr = gfx_v9_0_ring_get_rptr_compute,
.get_wptr = gfx_v9_0_ring_get_wptr_compute,
.set_wptr = gfx_v9_0_ring_set_wptr_compute,
@@ -3508,7 +3510,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
7 + /* gfx_v9_0_ring_emit_hdp_flush */
5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
- 64 + /* gfx_v9_0_ring_emit_vm_flush */
+ 24 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
@@ -3529,6 +3531,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
+ .vmhub = AMDGPU_GFXHUB,
.get_rptr = gfx_v9_0_ring_get_rptr_compute,
.get_wptr = gfx_v9_0_ring_get_wptr_compute,
.set_wptr = gfx_v9_0_ring_set_wptr_compute,
@@ -3537,7 +3540,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
7 + /* gfx_v9_0_ring_emit_hdp_flush */
5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
- 64 + /* gfx_v9_0_ring_emit_vm_flush */
+ 24 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
@@ -3612,7 +3615,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
{
/* init asci gds info */
- adev->gds.mem.total_size = RREG32(SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE));
+ adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
adev->gds.gws.total_size = 64;
adev->gds.oa.total_size = 16;
@@ -3641,8 +3644,8 @@ static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
- data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG));
- data |= RREG32(SOC15_REG_OFFSET(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG));
+ data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
+ data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
@@ -3763,25 +3766,25 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring->queue * MEC_HPD_SIZE);
eop_gpu_addr >>= 8;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_BASE_ADDR), lower_32_bits(eop_gpu_addr));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI), upper_32_bits(eop_gpu_addr));
+ WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR, lower_32_bits(eop_gpu_addr));
+ WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr));
mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_gpu_addr);
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_gpu_addr);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_CONTROL));
+ tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
(order_base_2(MEC_HPD_SIZE / 4) - 1));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_CONTROL), tmp);
+ WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL, tmp);
/* enable doorbell? */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL));
+ tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
if (use_doorbell)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
else
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), tmp);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, tmp);
mqd->cp_hqd_pq_doorbell_control = tmp;
/* disable the queue if it's active */
@@ -3790,40 +3793,40 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
mqd->cp_hqd_pq_rptr = 0;
mqd->cp_hqd_pq_wptr_lo = 0;
mqd->cp_hqd_pq_wptr_hi = 0;
- if (RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)) & 1) {
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), 1);
+ if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
for (j = 0; j < adev->usec_timeout; j++) {
- if (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)) & 1))
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
break;
udelay(1);
}
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), mqd->cp_hqd_dequeue_request);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_RPTR), mqd->cp_hqd_pq_rptr);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO), mqd->cp_hqd_pq_wptr_lo);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI), mqd->cp_hqd_pq_wptr_hi);
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, mqd->cp_hqd_pq_wptr_lo);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, mqd->cp_hqd_pq_wptr_hi);
}
/* set the pointer to the MQD */
mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR), mqd->cp_mqd_base_addr_lo);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR_HI), mqd->cp_mqd_base_addr_hi);
+ WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
+ WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
/* set MQD vmid to 0 */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MQD_CONTROL));
+ tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MQD_CONTROL), tmp);
+ WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL, tmp);
mqd->cp_mqd_control = tmp;
/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
hqd_gpu_addr = ring->gpu_addr >> 8;
mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE), mqd->cp_hqd_pq_base_lo);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI), mqd->cp_hqd_pq_base_hi);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
/* set up the HQD, this is similar to CP_RB0_CNTL */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_CONTROL));
+ tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(ring->ring_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
@@ -3835,7 +3838,7 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_CONTROL), tmp);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL, tmp);
mqd->cp_hqd_pq_control = tmp;
/* set the wb address wether it's enabled or not */
@@ -3843,27 +3846,27 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_rptr_report_addr_hi =
upper_32_bits(wb_gpu_addr) & 0xffff;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
mqd->cp_hqd_pq_rptr_report_addr_lo);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
mqd->cp_hqd_pq_rptr_report_addr_hi);
/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
mqd->cp_hqd_pq_wptr_poll_addr_lo);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
mqd->cp_hqd_pq_wptr_poll_addr_hi);
/* enable the doorbell if requested */
if (use_doorbell) {
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER),
+ WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
(AMDGPU_DOORBELL64_KIQ * 2) << 2);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER),
+ WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
(AMDGPU_DOORBELL64_MEC_RING7 * 2) << 2);
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL));
+ tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_OFFSET, ring->doorbell_index);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
@@ -3874,25 +3877,25 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
} else {
mqd->cp_hqd_pq_doorbell_control = 0;
}
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL),
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
mqd->cp_hqd_pq_doorbell_control);
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO), mqd->cp_hqd_pq_wptr_lo);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI), mqd->cp_hqd_pq_wptr_hi);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, mqd->cp_hqd_pq_wptr_lo);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, mqd->cp_hqd_pq_wptr_hi);
/* set the vmid for the queue */
mqd->cp_hqd_vmid = 0;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_VMID), mqd->cp_hqd_vmid);
+ WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PERSISTENT_STATE));
+ tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PERSISTENT_STATE), tmp);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, tmp);
mqd->cp_hqd_persistent_state = tmp;
/* activate the queue */
mqd->cp_hqd_active = 1;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), mqd->cp_hqd_active);
+ WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 631aef3..d860939 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -346,7 +346,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+ adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+ adev->mc.mc_vram_size);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -621,7 +622,7 @@ static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+ adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev);
@@ -949,10 +950,6 @@ static int gmc_v6_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->vm_manager.enabled) {
- gmc_v6_0_vm_fini(adev);
- adev->vm_manager.enabled = false;
- }
gmc_v6_0_hw_fini(adev);
return 0;
@@ -967,16 +964,9 @@ static int gmc_v6_0_resume(void *handle)
if (r)
return r;
- if (!adev->vm_manager.enabled) {
- r = gmc_v6_0_vm_init(adev);
- if (r) {
- dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
- return r;
- }
- adev->vm_manager.enabled = true;
- }
+ amdgpu_vm_reset_all_ids(adev);
- return r;
+ return 0;
}
static bool gmc_v6_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 92abe12..2750e5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -395,7 +395,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+ adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+ adev->mc.mc_vram_size);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -746,7 +747,7 @@ static int gmc_v7_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+ adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev);
@@ -1116,10 +1117,6 @@ static int gmc_v7_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->vm_manager.enabled) {
- gmc_v7_0_vm_fini(adev);
- adev->vm_manager.enabled = false;
- }
gmc_v7_0_hw_fini(adev);
return 0;
@@ -1134,16 +1131,9 @@ static int gmc_v7_0_resume(void *handle)
if (r)
return r;
- if (!adev->vm_manager.enabled) {
- r = gmc_v7_0_vm_init(adev);
- if (r) {
- dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
- return r;
- }
- adev->vm_manager.enabled = true;
- }
+ amdgpu_vm_reset_all_ids(adev);
- return r;
+ return 0;
}
static bool gmc_v7_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index f2ccefc..f56b408 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -557,7 +557,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+ adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+ adev->mc.mc_vram_size);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -949,7 +950,7 @@ static int gmc_v8_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+ adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.num_level = 1;
amdgpu_vm_manager_init(adev);
@@ -1208,10 +1209,6 @@ static int gmc_v8_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->vm_manager.enabled) {
- gmc_v8_0_vm_fini(adev);
- adev->vm_manager.enabled = false;
- }
gmc_v8_0_hw_fini(adev);
return 0;
@@ -1226,16 +1223,9 @@ static int gmc_v8_0_resume(void *handle)
if (r)
return r;
- if (!adev->vm_manager.enabled) {
- r = gmc_v8_0_vm_init(adev);
- if (r) {
- dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
- return r;
- }
- adev->vm_manager.enabled = true;
- }
+ amdgpu_vm_reset_all_ids(adev);
- return r;
+ return 0;
}
static bool gmc_v8_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 3b045e0..f936332 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -386,6 +386,23 @@ static int gmc_v9_0_early_init(void *handle)
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
+ unsigned i;
+
+ for(i = 0; i < adev->num_rings; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ unsigned vmhub = ring->funcs->vmhub;
+
+ ring->vm_inv_eng = vm_inv_eng[vmhub]++;
+ dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
+ ring->idx, ring->name, ring->vm_inv_eng,
+ ring->funcs->vmhub);
+ }
+
+ /* Engine 17 is used for GART flushes */
+ for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
+ BUG_ON(vm_inv_eng[i] > 17);
+
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}
@@ -469,7 +486,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
* size equal to the 1024 or vram, whichever is larger.
*/
if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size);
+ adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+ adev->mc.mc_vram_size);
else
adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
@@ -519,7 +537,8 @@ static int gmc_v9_0_vm_init(struct amdgpu_device *adev)
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
- adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
+ adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
+ adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
/* TODO: fix num_level for APU when updating vm size and block size */
if (adev->flags & AMD_IS_APU)
@@ -772,10 +791,6 @@ static int gmc_v9_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->vm_manager.enabled) {
- gmc_v9_0_vm_fini(adev);
- adev->vm_manager.enabled = false;
- }
gmc_v9_0_hw_fini(adev);
return 0;
@@ -790,17 +805,9 @@ static int gmc_v9_0_resume(void *handle)
if (r)
return r;
- if (!adev->vm_manager.enabled) {
- r = gmc_v9_0_vm_init(adev);
- if (r) {
- dev_err(adev->dev,
- "vm manager initialization failed (%d).\n", r);
- return r;
- }
- adev->vm_manager.enabled = true;
- }
+ amdgpu_vm_reset_all_ids(adev);
- return r;
+ return 0;
}
static bool gmc_v9_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 6268451..dbfe48d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -511,6 +511,9 @@ static int mmhub_v1_0_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
switch (adev->asic_type) {
case CHIP_VEGA10:
mmhub_v1_0_update_medium_grain_clock_gating(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
index 5f0fc8bf..8af0bdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
@@ -84,4 +84,61 @@ struct mmsch_v1_0_cmd_indirect_write {
uint32_t reg_value;
};
+static inline void mmsch_v1_0_insert_direct_wt(struct mmsch_v1_0_cmd_direct_write *direct_wt,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t value)
+{
+ direct_wt->cmd_header.reg_offset = reg_offset;
+ direct_wt->reg_value = value;
+ memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v1_0_cmd_direct_write));
+}
+
+static inline void mmsch_v1_0_insert_direct_rd_mod_wt(struct mmsch_v1_0_cmd_direct_read_modify_write *direct_rd_mod_wt,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t mask, uint32_t data)
+{
+ direct_rd_mod_wt->cmd_header.reg_offset = reg_offset;
+ direct_rd_mod_wt->mask_value = mask;
+ direct_rd_mod_wt->write_data = data;
+ memcpy((void *)init_table, direct_rd_mod_wt,
+ sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write));
+}
+
+static inline void mmsch_v1_0_insert_direct_poll(struct mmsch_v1_0_cmd_direct_polling *direct_poll,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t mask, uint32_t wait)
+{
+ direct_poll->cmd_header.reg_offset = reg_offset;
+ direct_poll->mask_value = mask;
+ direct_poll->wait_value = wait;
+ memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v1_0_cmd_direct_polling));
+}
+
+#define MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
+ mmsch_v1_0_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \
+ init_table, (reg), \
+ (mask), (data)); \
+ init_table += sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write)/4; \
+ table_size += sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write)/4; \
+}
+
+#define MMSCH_V1_0_INSERT_DIRECT_WT(reg, value) { \
+ mmsch_v1_0_insert_direct_wt(&direct_wt, \
+ init_table, (reg), \
+ (value)); \
+ init_table += sizeof(struct mmsch_v1_0_cmd_direct_write)/4; \
+ table_size += sizeof(struct mmsch_v1_0_cmd_direct_write)/4; \
+}
+
+#define MMSCH_V1_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
+ mmsch_v1_0_insert_direct_poll(&direct_poll, \
+ init_table, (reg), \
+ (mask), (wait)); \
+ init_table += sizeof(struct mmsch_v1_0_cmd_direct_polling)/4; \
+ table_size += sizeof(struct mmsch_v1_0_cmd_direct_polling)/4; \
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 70a3dd1..7bdc51b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -368,9 +368,12 @@ static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
u32 reg;
u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
- reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
- if (!(reg & mask))
- return -ENOENT;
+ /* workaround: host driver doesn't set VALID for CMPL now */
+ if (event != IDH_FLR_NOTIFICATION_CMPL) {
+ reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
+ if (!(reg & mask))
+ return -ENOENT;
+ }
reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
if (reg != event)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index c3588d1..60a6407 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -166,11 +166,8 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
{
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
- struct amdgpu_bo *psp_sysdrv;
- void *psp_sysdrv_virt = NULL;
- uint64_t psp_sysdrv_mem;
struct amdgpu_device *adev = psp->adev;
- uint32_t size, sol_reg;
+ uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
@@ -185,27 +182,14 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
if (ret)
return ret;
- /*
- * Create a 1 meg GART memory to store the psp sys driver
- * binary with a 1 meg aligned address
- */
- size = (psp->sys_bin_size + (PSP_BOOTLOADER_1_MEG_ALIGNMENT - 1)) &
- (~(PSP_BOOTLOADER_1_MEG_ALIGNMENT - 1));
-
- ret = amdgpu_bo_create_kernel(adev, size, PSP_BOOTLOADER_1_MEG_ALIGNMENT,
- AMDGPU_GEM_DOMAIN_GTT,
- &psp_sysdrv,
- &psp_sysdrv_mem,
- &psp_sysdrv_virt);
- if (ret)
- return ret;
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
/* Copy PSP System Driver binary to memory */
- memcpy(psp_sysdrv_virt, psp->sys_start_addr, psp->sys_bin_size);
+ memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
/* Provide the sys driver to bootrom */
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_36),
- (uint32_t)(psp_sysdrv_mem >> 20));
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 1 << 16;
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
psp_gfxdrv_command_reg);
@@ -216,8 +200,6 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
0x80000000, 0x80000000, false);
- amdgpu_bo_free_kernel(&psp_sysdrv, &psp_sysdrv_mem, &psp_sysdrv_virt);
-
return ret;
}
@@ -225,11 +207,8 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
{
int ret;
unsigned int psp_gfxdrv_command_reg = 0;
- struct amdgpu_bo *psp_sos;
- void *psp_sos_virt = NULL;
- uint64_t psp_sos_mem;
struct amdgpu_device *adev = psp->adev;
- uint32_t size, sol_reg;
+ uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
@@ -244,23 +223,14 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
if (ret)
return ret;
- size = (psp->sos_bin_size + (PSP_BOOTLOADER_1_MEG_ALIGNMENT - 1)) &
- (~((uint64_t)PSP_BOOTLOADER_1_MEG_ALIGNMENT - 1));
-
- ret = amdgpu_bo_create_kernel(adev, size, PSP_BOOTLOADER_1_MEG_ALIGNMENT,
- AMDGPU_GEM_DOMAIN_GTT,
- &psp_sos,
- &psp_sos_mem,
- &psp_sos_virt);
- if (ret)
- return ret;
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
/* Copy Secure OS binary to PSP memory */
- memcpy(psp_sos_virt, psp->sos_start_addr, psp->sos_bin_size);
+ memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
/* Provide the PSP secure OS to bootrom */
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_36),
- (uint32_t)(psp_sos_mem >> 20));
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 2 << 16;
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
psp_gfxdrv_command_reg);
@@ -273,8 +243,6 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
0, true);
#endif
- amdgpu_bo_free_kernel(&psp_sos, &psp_sos_mem, &psp_sos_virt);
-
return ret;
}
@@ -300,7 +268,6 @@ int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd
int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
{
int ret = 0;
- unsigned int psp_ring_reg = 0;
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
@@ -320,6 +287,16 @@ int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
+ return 0;
+}
+
+int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ unsigned int psp_ring_reg = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
/* Write low address of the ring to C2PMSG_69 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_69), psp_ring_reg);
@@ -344,6 +321,33 @@ int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
+int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring;
+ unsigned int psp_ring_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ ring = &psp->km_ring;
+
+ /* Write the ring destroy command to C2PMSG_64 */
+ psp_ring_reg = 3 << 16;
+ WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), psp_ring_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+
+ if (ring->ring_mem)
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+ return ret;
+}
+
int psp_v3_1_cmd_submit(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
index e82eff7..9dcd0b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
@@ -39,6 +39,10 @@ extern int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd);
extern int psp_v3_1_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type);
+extern int psp_v3_1_ring_create(struct psp_context *psp,
+ enum psp_ring_type ring_type);
+extern int psp_v3_1_ring_destroy(struct psp_context *psp,
+ enum psp_ring_type ring_type);
extern int psp_v3_1_cmd_submit(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 21f38d8..ecc70a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -48,8 +48,7 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
-static const u32 golden_settings_sdma_4[] =
-{
+static const u32 golden_settings_sdma_4[] = {
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100,
@@ -76,8 +75,7 @@ static const u32 golden_settings_sdma_4[] =
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_UTCL1_PAGE), 0x000003ff, 0x000003c0
};
-static const u32 golden_settings_sdma_vg10[] =
-{
+static const u32 golden_settings_sdma_vg10[] = {
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002,
SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
@@ -87,16 +85,17 @@ static const u32 golden_settings_sdma_vg10[] =
static u32 sdma_v4_0_get_reg_offset(u32 instance, u32 internal_offset)
{
u32 base = 0;
+
switch (instance) {
- case 0:
- base = SDMA0_BASE.instance[0].segment[0];
- break;
- case 1:
- base = SDMA1_BASE.instance[0].segment[0];
- break;
- default:
- BUG();
- break;
+ case 0:
+ base = SDMA0_BASE.instance[0].segment[0];
+ break;
+ case 1:
+ base = SDMA1_BASE.instance[0].segment[0];
+ break;
+ default:
+ BUG();
+ break;
}
return base + internal_offset;
@@ -159,7 +158,8 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
case CHIP_VEGA10:
chip_name = "vega10";
break;
- default: BUG();
+ default:
+ BUG();
}
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -179,7 +179,7 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
if (adev->sdma.instance[i].feature_version >= 20)
adev->sdma.instance[i].burst_nop = true;
DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP? "true": "false");
+ adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -192,9 +192,7 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
}
out:
if (err) {
- printk(KERN_ERR
- "sdma_v4_0: Failed to load firmware \"%s\"\n",
- fw_name);
+ DRM_ERROR("sdma_v4_0: Failed to load firmware \"%s\"\n", fw_name);
for (i = 0; i < adev->sdma.num_instances; i++) {
release_firmware(adev->sdma.instance[i].fw);
adev->sdma.instance[i].fw = NULL;
@@ -212,10 +210,10 @@ out:
*/
static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
{
- u64* rptr;
+ u64 *rptr;
/* XXX check if swapping is necessary on BE */
- rptr =((u64*)&ring->adev->wb.wb[ring->rptr_offs]);
+ rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
return ((*rptr) >> 2);
@@ -231,19 +229,20 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u64* wptr = NULL;
- uint64_t local_wptr=0;
+ u64 *wptr = NULL;
+ uint64_t local_wptr = 0;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
- wptr = ((u64*)&adev->wb.wb[ring->wptr_offs]);
+ wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr);
*wptr = (*wptr) >> 2;
DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
} else {
u32 lowbit, highbit;
int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- wptr=&local_wptr;
+
+ wptr = &local_wptr;
lowbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR)) >> 2;
highbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
@@ -285,12 +284,13 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
+
DRM_DEBUG("Not using doorbell -- "
"mmSDMA%i_GFX_RB_WPTR == 0x%08x "
- "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x \n",
- me,
+ "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
me,
lower_32_bits(ring->wptr << 2),
+ me,
upper_32_bits(ring->wptr << 2));
WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
@@ -319,22 +319,22 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VEGA10).
*/
static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
{
- u32 vmid = vm_id & 0xf;
+ u32 vmid = vm_id & 0xf;
- /* IB packet must end on a 8 DW boundary */
- sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ /* IB packet must end on a 8 DW boundary */
+ sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
- SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
- /* base must be 32 byte aligned */
- amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
- amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, ib->length_dw);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
+ SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
+ /* base must be 32 byte aligned */
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0);
}
@@ -523,7 +523,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
u32 doorbell;
u32 doorbell_offset;
u32 temp;
- int i,r;
+ int i, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
@@ -572,7 +572,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
doorbell = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL));
doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET));
- if (ring->use_doorbell){
+ if (ring->use_doorbell) {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
OFFSET, ring->doorbell_index);
@@ -694,9 +694,7 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
for (j = 0; j < fw_size; j++)
- {
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
- }
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
}
@@ -744,10 +742,8 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
if (r)
return r;
r = sdma_v4_0_rlc_resume(adev);
- if (r)
- return r;
- return 0;
+ return r;
}
/**
@@ -797,9 +793,8 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
for (i = 0; i < adev->usec_timeout; i++) {
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
+ if (tmp == 0xDEADBEEF)
break;
- }
DRM_UDELAY(1);
}
@@ -864,29 +859,29 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
if (r)
goto err1;
- r = dma_fence_wait_timeout(f, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
- r = -ETIMEDOUT;
- goto err1;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- goto err1;
- }
- tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
- r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
- r = -EINVAL;
- }
+ r = dma_fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
+ goto err1;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ } else {
+ DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
err1:
- amdgpu_ib_free(adev, &ib, NULL);
- dma_fence_put(f);
+ amdgpu_ib_free(adev, &ib, NULL);
+ dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
- return r;
+ amdgpu_wb_free(adev, index);
+ return r;
}
@@ -1039,44 +1034,40 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
- unsigned eng = ring->idx;
- unsigned i;
+ unsigned eng = ring->vm_inv_eng;
pd_addr = pd_addr | 0x1; /* valid bit */
/* now only use physical base address of PDE and valid */
BUG_ON(pd_addr & 0xFFFF00000000003EULL);
- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
- SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vm_id * 2);
- amdgpu_ring_write(ring, lower_32_bits(pd_addr));
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
- SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vm_id * 2);
- amdgpu_ring_write(ring, upper_32_bits(pd_addr));
-
- /* flush TLB */
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
- SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, hub->vm_inv_eng0_req + eng);
- amdgpu_ring_write(ring, req);
-
- /* wait for flush */
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
- amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 1 << vm_id); /* reference */
- amdgpu_ring_write(ring, 1 << vm_id); /* mask */
- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
- }
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vm_id * 2);
+ amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vm_id * 2);
+ amdgpu_ring_write(ring, upper_32_bits(pd_addr));
+
+ /* flush TLB */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ amdgpu_ring_write(ring, hub->vm_inv_eng0_req + eng);
+ amdgpu_ring_write(ring, req);
+
+ /* wait for flush */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
+ amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 1 << vm_id); /* reference */
+ amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
}
static int sdma_v4_0_early_init(void *handle)
@@ -1162,8 +1153,6 @@ static int sdma_v4_0_hw_init(void *handle)
sdma_v4_0_init_golden_registers(adev);
r = sdma_v4_0_start(adev);
- if (r)
- return r;
return r;
}
@@ -1199,10 +1188,12 @@ static bool sdma_v4_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 i;
+
for (i = 0; i < adev->sdma.num_instances; i++) {
u32 tmp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_STATUS_REG));
+
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
- return false;
+ return false;
}
return true;
@@ -1211,8 +1202,9 @@ static bool sdma_v4_0_is_idle(void *handle)
static int sdma_v4_0_wait_for_idle(void *handle)
{
unsigned i;
- u32 sdma0,sdma1;
+ u32 sdma0, sdma1;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
for (i = 0; i < adev->usec_timeout; i++) {
sdma0 = RREG32(sdma_v4_0_get_reg_offset(0, mmSDMA0_STATUS_REG));
sdma1 = RREG32(sdma_v4_0_get_reg_offset(1, mmSDMA0_STATUS_REG));
@@ -1240,7 +1232,7 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ?
sdma_v4_0_get_reg_offset(0, mmSDMA0_CNTL) :
- sdma_v4_0_get_reg_offset(1, mmSDMA0_CNTL);
+ sdma_v4_0_get_reg_offset(1, mmSDMA0_CNTL);
sdma_cntl = RREG32(reg_offset);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
@@ -1332,7 +1324,7 @@ static void sdma_v4_0_update_medium_grain_clock_gating(
SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
- if(def != data)
+ if (def != data)
WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), data);
}
} else {
@@ -1382,17 +1374,17 @@ static void sdma_v4_0_update_medium_grain_light_sleep(
/* 1-not override: enable sdma1 mem light sleep */
if (adev->asic_type == CHIP_VEGA10) {
- def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
- data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
- if (def != data)
- WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
+ def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
+ data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+ if (def != data)
+ WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
}
} else {
/* 0-override:disable sdma0 mem light sleep */
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data)
- WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
+ WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
/* 0-override:disable sdma1 mem light sleep */
if (adev->asic_type == CHIP_VEGA10) {
@@ -1473,6 +1465,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
+ .vmhub = AMDGPU_MMHUB,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_ring_get_wptr,
.set_wptr = sdma_v4_0_ring_set_wptr,
@@ -1480,7 +1473,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
6 + /* sdma_v4_0_ring_emit_hdp_flush */
3 + /* sdma_v4_0_ring_emit_hdp_invalidate */
6 + /* sdma_v4_0_ring_emit_pipeline_sync */
- 36 + /* sdma_v4_0_ring_emit_vm_flush */
+ 18 + /* sdma_v4_0_ring_emit_vm_flush */
10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
.emit_ib = sdma_v4_0_ring_emit_ib,
@@ -1606,8 +1599,7 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
}
}
-const struct amdgpu_ip_block_version sdma_v4_0_ip_block =
-{
+const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 4,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 385de86..6b55d45 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -25,7 +25,7 @@
#include <linux/module.h>
#include "drmP.h"
#include "amdgpu.h"
-#include "amdgpu_atombios.h"
+#include "amdgpu_atomfirmware.h"
#include "amdgpu_ih.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
@@ -405,11 +405,11 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
static int soc15_asic_reset(struct amdgpu_device *adev)
{
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+ amdgpu_atomfirmware_scratch_regs_engine_hung(adev, true);
soc15_gpu_pci_config_reset(adev);
- amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+ amdgpu_atomfirmware_scratch_regs_engine_hung(adev, false);
return 0;
}
@@ -505,8 +505,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
- if (!amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
+ amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 2b96c80..e8df6d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -45,13 +45,31 @@ struct nbio_pcie_index_data {
u32 index_offset;
u32 data_offset;
};
-// Register Access Macro
+
+/* Register Access Macros */
#define SOC15_REG_OFFSET(ip, inst, reg) (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
(2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
(ip##_BASE__INST##inst##_SEG4 + reg)))))
+#define WREG32_FIELD15(ip, idx, reg, field, val) \
+ WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+
+#define RREG32_SOC15(ip, inst, reg) \
+ RREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
+ (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
+ (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
+ (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
+ (ip##_BASE__INST##inst##_SEG4 + reg))))))
+
+#define WREG32_SOC15(ip, inst, reg, value) \
+ WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
+ (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
+ (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
+ (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
+ (ip##_BASE__INST##inst##_SEG4 + reg))))), value)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 9bcf014..eca8f6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -27,10 +27,14 @@
#include "amdgpu_uvd.h"
#include "soc15d.h"
#include "soc15_common.h"
+#include "mmsch_v1_0.h"
#include "vega10/soc15ip.h"
#include "vega10/UVD/uvd_7_0_offset.h"
#include "vega10/UVD/uvd_7_0_sh_mask.h"
+#include "vega10/VCE/vce_4_0_offset.h"
+#include "vega10/VCE/vce_4_0_default.h"
+#include "vega10/VCE/vce_4_0_sh_mask.h"
#include "vega10/NBIF/nbif_6_1_offset.h"
#include "vega10/HDP/hdp_4_0_offset.h"
#include "vega10/MMHUB/mmhub_1_0_offset.h"
@@ -41,6 +45,7 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v7_0_start(struct amdgpu_device *adev);
static void uvd_v7_0_stop(struct amdgpu_device *adev);
+static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
/**
* uvd_v7_0_ring_get_rptr - get read pointer
@@ -98,6 +103,9 @@ static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ if (ring->use_doorbell)
+ return adev->wb.wb[ring->wptr_offs];
+
if (ring == &adev->uvd.ring_enc[0])
return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR));
else
@@ -129,6 +137,13 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
+ WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
+ return;
+ }
+
if (ring == &adev->uvd.ring_enc[0])
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_WPTR),
lower_32_bits(ring->wptr));
@@ -353,7 +368,10 @@ static int uvd_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->uvd.num_enc_rings = 2;
+ if (amdgpu_sriov_vf(adev))
+ adev->uvd.num_enc_rings = 1;
+ else
+ adev->uvd.num_enc_rings = 2;
uvd_v7_0_set_ring_funcs(adev);
uvd_v7_0_set_enc_ring_funcs(adev);
uvd_v7_0_set_irq_funcs(adev);
@@ -406,21 +424,31 @@ static int uvd_v7_0_sw_init(void *handle)
r = amdgpu_uvd_resume(adev);
if (r)
return r;
+ if (!amdgpu_sriov_vf(adev)) {
+ ring = &adev->uvd.ring;
+ sprintf(ring->name, "uvd");
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+ if (r)
+ return r;
+ }
- ring = &adev->uvd.ring;
- sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
- if (r)
- return r;
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.ring_enc[i];
sprintf(ring->name, "uvd_enc%d", i);
+ if (amdgpu_sriov_vf(adev)) {
+ ring->use_doorbell = true;
+ ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+ }
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
if (r)
return r;
}
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+
return r;
}
@@ -429,6 +457,8 @@ static int uvd_v7_0_sw_fini(void *handle)
int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_virt_free_mm_table(adev);
+
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
@@ -455,48 +485,53 @@ static int uvd_v7_0_hw_init(void *handle)
uint32_t tmp;
int i, r;
- r = uvd_v7_0_start(adev);
+ if (amdgpu_sriov_vf(adev))
+ r = uvd_v7_0_sriov_start(adev);
+ else
+ r = uvd_v7_0_start(adev);
if (r)
goto done;
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
- goto done;
- }
+ if (!amdgpu_sriov_vf(adev)) {
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ goto done;
+ }
- r = amdgpu_ring_alloc(ring, 10);
- if (r) {
- DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
- goto done;
- }
+ r = amdgpu_ring_alloc(ring, 10);
+ if (r) {
+ DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
+ goto done;
+ }
- tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
- mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
- amdgpu_ring_write(ring, tmp);
- amdgpu_ring_write(ring, 0xFFFFF);
+ tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
+ mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
+ amdgpu_ring_write(ring, tmp);
+ amdgpu_ring_write(ring, 0xFFFFF);
- tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
- mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
- amdgpu_ring_write(ring, tmp);
- amdgpu_ring_write(ring, 0xFFFFF);
+ tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
+ mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
+ amdgpu_ring_write(ring, tmp);
+ amdgpu_ring_write(ring, 0xFFFFF);
- tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
- mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
- amdgpu_ring_write(ring, tmp);
- amdgpu_ring_write(ring, 0xFFFFF);
+ tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0,
+ mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
+ amdgpu_ring_write(ring, tmp);
+ amdgpu_ring_write(ring, 0xFFFFF);
- /* Clear timeout status bits */
- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
- mmUVD_SEMA_TIMEOUT_STATUS), 0));
- amdgpu_ring_write(ring, 0x8);
+ /* Clear timeout status bits */
+ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
+ mmUVD_SEMA_TIMEOUT_STATUS), 0));
+ amdgpu_ring_write(ring, 0x8);
- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
- mmUVD_SEMA_CNTL), 0));
- amdgpu_ring_write(ring, 3);
+ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
+ mmUVD_SEMA_CNTL), 0));
+ amdgpu_ring_write(ring, 3);
- amdgpu_ring_commit(ring);
+ amdgpu_ring_commit(ring);
+ }
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.ring_enc[i];
@@ -618,6 +653,241 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
}
+static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
+ struct amdgpu_mm_table *table)
+{
+ uint32_t data = 0, loop;
+ uint64_t addr = table->gpu_addr;
+ struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
+ uint32_t size;
+
+ size = header->header_size + header->vce_table_size + header->uvd_table_size;
+
+ /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO), lower_32_bits(addr));
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI), upper_32_bits(addr));
+
+ /* 2, update vmid of descriptor */
+ data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID));
+ data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID), data);
+
+ /* 3, notify mmsch about the size of this descriptor */
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE), size);
+
+ /* 4, set resp to zero */
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
+
+ /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001);
+
+ data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
+ loop = 1000;
+ while ((data & 0x10000002) != 0x10000002) {
+ udelay(10);
+ data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
+ loop--;
+ if (!loop)
+ break;
+ }
+
+ if (!loop) {
+ dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ uint32_t offset, size, tmp;
+ uint32_t table_size = 0;
+ struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
+ struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
+ struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
+ struct mmsch_v1_0_cmd_end end = { {0} };
+ uint32_t *init_table = adev->virt.mm_table.cpu_addr;
+ struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
+
+ direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
+ end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+ if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
+ header->version = MMSCH_VERSION;
+ header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
+
+ if (header->vce_table_offset == 0 && header->vce_table_size == 0)
+ header->uvd_table_offset = header->header_size;
+ else
+ header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
+
+ init_table += header->uvd_table_offset;
+
+ ring = &adev->uvd.ring;
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
+
+ /* disable clock gating */
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+ ~UVD_POWER_STATUS__UVD_PG_MODE_MASK, 0);
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
+ 0xFFFFFFFF, 0x00000004);
+ /* mc resume*/
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+ offset = 0;
+ } else {
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->uvd.gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->uvd.gpu_addr));
+ offset = size;
+ }
+
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
+
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->uvd.gpu_addr + offset));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->uvd.gpu_addr + offset));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
+
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
+ AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
+
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
+ /* mc resume end*/
+
+ /* disable clock gating */
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL),
+ ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
+
+ /* disable interupt */
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
+ ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
+
+ /* stall UMC and register bus before resetting VCPU */
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
+ UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ /* put LMI, VCPU, RBC etc... into reset */
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
+ (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
+
+ /* initialize UVD memory controller */
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL),
+ (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ 0x00100000L));
+
+ /* disable byte swapping */
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), 0);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), 0);
+
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88);
+
+ /* take all subblocks out of reset, except VCPU */
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+
+ /* enable VCPU clock */
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* enable UMC */
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
+
+ /* boot up the VCPU */
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
+
+ MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
+
+ /* enable master interrupt */
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
+ ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+ (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
+
+ /* clear the bit 4 of UVD_STATUS */
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
+
+ /* force RBC into idle state */
+ size = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
+
+ /* set the write pointer delay */
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0);
+
+ /* set the wb address */
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR),
+ (upper_32_bits(ring->gpu_addr) >> 2));
+
+ /* programm the RB_BASE for ring buffer */
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
+ upper_32_bits(ring->gpu_addr));
+
+ ring->wptr = 0;
+ ring = &adev->uvd.ring_enc[0];
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
+
+ /* add end packet */
+ memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
+ table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
+ header->uvd_table_size = table_size;
+
+ return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
+ }
+ return -EINVAL; /* already initializaed ? */
+}
+
/**
* uvd_v7_0_start - start UVD block
*
@@ -1034,42 +1304,38 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
uint32_t data0, data1, mask;
- unsigned eng = ring->idx;
- unsigned i;
+ unsigned eng = ring->vm_inv_eng;
pd_addr = pd_addr | 0x1; /* valid bit */
/* now only use physical base address of PDE and valid */
BUG_ON(pd_addr & 0xFFFF00000000003EULL);
- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
-
- data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
- data1 = upper_32_bits(pd_addr);
- uvd_v7_0_vm_reg_write(ring, data0, data1);
-
- data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
- data1 = lower_32_bits(pd_addr);
- uvd_v7_0_vm_reg_write(ring, data0, data1);
-
- data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
- data1 = lower_32_bits(pd_addr);
- mask = 0xffffffff;
- uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
-
- /* flush TLB */
- data0 = (hub->vm_inv_eng0_req + eng) << 2;
- data1 = req;
- uvd_v7_0_vm_reg_write(ring, data0, data1);
-
- /* wait for flush */
- data0 = (hub->vm_inv_eng0_ack + eng) << 2;
- data1 = 1 << vm_id;
- mask = 1 << vm_id;
- uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
- }
+ data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
+ data1 = upper_32_bits(pd_addr);
+ uvd_v7_0_vm_reg_write(ring, data0, data1);
+
+ data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
+ data1 = lower_32_bits(pd_addr);
+ uvd_v7_0_vm_reg_write(ring, data0, data1);
+
+ data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
+ data1 = lower_32_bits(pd_addr);
+ mask = 0xffffffff;
+ uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
+
+ /* flush TLB */
+ data0 = (hub->vm_inv_eng0_req + eng) << 2;
+ data1 = req;
+ uvd_v7_0_vm_reg_write(ring, data0, data1);
+
+ /* wait for flush */
+ data0 = (hub->vm_inv_eng0_ack + eng) << 2;
+ data1 = 1 << vm_id;
+ mask = 1 << vm_id;
+ uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
}
static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
@@ -1080,44 +1346,37 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr)
{
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
- unsigned eng = ring->idx;
- unsigned i;
+ unsigned eng = ring->vm_inv_eng;
pd_addr = pd_addr | 0x1; /* valid bit */
/* now only use physical base address of PDE and valid */
BUG_ON(pd_addr & 0xFFFF00000000003EULL);
- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
-
- amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
- amdgpu_ring_write(ring,
- (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, upper_32_bits(pd_addr));
-
- amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
- amdgpu_ring_write(ring,
- (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, lower_32_bits(pd_addr));
-
- amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
- amdgpu_ring_write(ring,
- (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, 0xffffffff);
- amdgpu_ring_write(ring, lower_32_bits(pd_addr));
-
- /* flush TLB */
- amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
- amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
- amdgpu_ring_write(ring, req);
-
- /* wait for flush */
- amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
- amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
- amdgpu_ring_write(ring, 1 << vm_id);
- amdgpu_ring_write(ring, 1 << vm_id);
- }
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
+ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
+ amdgpu_ring_write(ring, upper_32_bits(pd_addr));
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
+ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+ amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
+ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+ amdgpu_ring_write(ring, 0xffffffff);
+ amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+
+ /* flush TLB */
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
+ amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
+ amdgpu_ring_write(ring, req);
+
+ /* wait for flush */
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
+ amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
+ amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, 1 << vm_id);
}
#if 0
@@ -1240,7 +1499,8 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
amdgpu_fence_process(&adev->uvd.ring_enc[0]);
break;
case 120:
- amdgpu_fence_process(&adev->uvd.ring_enc[1]);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_fence_process(&adev->uvd.ring_enc[1]);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
@@ -1448,13 +1708,14 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.align_mask = 0xf,
.nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
.support_64bit_ptrs = false,
+ .vmhub = AMDGPU_MMHUB,
.get_rptr = uvd_v7_0_ring_get_rptr,
.get_wptr = uvd_v7_0_ring_get_wptr,
.set_wptr = uvd_v7_0_ring_set_wptr,
.emit_frame_size =
2 + /* uvd_v7_0_ring_emit_hdp_flush */
2 + /* uvd_v7_0_ring_emit_hdp_invalidate */
- 34 * AMDGPU_MAX_VMHUBS + /* uvd_v7_0_ring_emit_vm_flush */
+ 34 + /* uvd_v7_0_ring_emit_vm_flush */
14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
.emit_ib = uvd_v7_0_ring_emit_ib,
@@ -1475,11 +1736,12 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
.align_mask = 0x3f,
.nop = HEVC_ENC_CMD_NO_OP,
.support_64bit_ptrs = false,
+ .vmhub = AMDGPU_MMHUB,
.get_rptr = uvd_v7_0_enc_ring_get_rptr,
.get_wptr = uvd_v7_0_enc_ring_get_wptr,
.set_wptr = uvd_v7_0_enc_ring_set_wptr,
.emit_frame_size =
- 17 * AMDGPU_MAX_VMHUBS + /* uvd_v7_0_enc_ring_emit_vm_flush */
+ 17 + /* uvd_v7_0_enc_ring_emit_vm_flush */
5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1, /* uvd_v7_0_enc_ring_insert_end */
.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index fb08193..90332f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -77,13 +77,26 @@ static int vce_v3_0_set_clockgating_state(void *handle,
static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ u32 v;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (adev->vce.harvest_config == 0 ||
+ adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+ else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (ring == &adev->vce.ring[0])
- return RREG32(mmVCE_RB_RPTR);
+ v = RREG32(mmVCE_RB_RPTR);
else if (ring == &adev->vce.ring[1])
- return RREG32(mmVCE_RB_RPTR2);
+ v = RREG32(mmVCE_RB_RPTR2);
else
- return RREG32(mmVCE_RB_RPTR3);
+ v = RREG32(mmVCE_RB_RPTR3);
+
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return v;
}
/**
@@ -96,13 +109,26 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ u32 v;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (adev->vce.harvest_config == 0 ||
+ adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+ else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (ring == &adev->vce.ring[0])
- return RREG32(mmVCE_RB_WPTR);
+ v = RREG32(mmVCE_RB_WPTR);
else if (ring == &adev->vce.ring[1])
- return RREG32(mmVCE_RB_WPTR2);
+ v = RREG32(mmVCE_RB_WPTR2);
else
- return RREG32(mmVCE_RB_WPTR3);
+ v = RREG32(mmVCE_RB_WPTR3);
+
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return v;
}
/**
@@ -116,12 +142,22 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (adev->vce.harvest_config == 0 ||
+ adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
+ else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
+
if (ring == &adev->vce.ring[0])
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
else if (ring == &adev->vce.ring[1])
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
else
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
+
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
+ mutex_unlock(&adev->grbm_idx_mutex);
}
static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
@@ -231,33 +267,38 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int idx, r;
- ring = &adev->vce.ring[0];
- WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
- WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
- WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
- WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
-
- ring = &adev->vce.ring[1];
- WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
- WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
- WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
- WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
-
- ring = &adev->vce.ring[2];
- WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
- WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
- WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
- WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
- WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
-
mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) {
if (adev->vce.harvest_config & (1 << idx))
continue;
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
+
+ /* Program instance 0 reg space for two instances or instance 0 case
+ program instance 1 reg space for only instance 1 available case */
+ if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
+ ring = &adev->vce.ring[0];
+ WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
+ WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+ ring = &adev->vce.ring[1];
+ WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
+ WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+
+ ring = &adev->vce.ring[2];
+ WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
+ WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
+ }
+
vce_v3_0_mc_resume(adev, idx);
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index edde5fe..139f964 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -49,63 +49,6 @@ static void vce_v4_0_mc_resume(struct amdgpu_device *adev);
static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev);
-static inline void mmsch_insert_direct_wt(struct mmsch_v1_0_cmd_direct_write *direct_wt,
- uint32_t *init_table,
- uint32_t reg_offset,
- uint32_t value)
-{
- direct_wt->cmd_header.reg_offset = reg_offset;
- direct_wt->reg_value = value;
- memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v1_0_cmd_direct_write));
-}
-
-static inline void mmsch_insert_direct_rd_mod_wt(struct mmsch_v1_0_cmd_direct_read_modify_write *direct_rd_mod_wt,
- uint32_t *init_table,
- uint32_t reg_offset,
- uint32_t mask, uint32_t data)
-{
- direct_rd_mod_wt->cmd_header.reg_offset = reg_offset;
- direct_rd_mod_wt->mask_value = mask;
- direct_rd_mod_wt->write_data = data;
- memcpy((void *)init_table, direct_rd_mod_wt,
- sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write));
-}
-
-static inline void mmsch_insert_direct_poll(struct mmsch_v1_0_cmd_direct_polling *direct_poll,
- uint32_t *init_table,
- uint32_t reg_offset,
- uint32_t mask, uint32_t wait)
-{
- direct_poll->cmd_header.reg_offset = reg_offset;
- direct_poll->mask_value = mask;
- direct_poll->wait_value = wait;
- memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v1_0_cmd_direct_polling));
-}
-
-#define INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
- mmsch_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \
- init_table, (reg), \
- (mask), (data)); \
- init_table += sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write)/4; \
- table_size += sizeof(struct mmsch_v1_0_cmd_direct_read_modify_write)/4; \
-}
-
-#define INSERT_DIRECT_WT(reg, value) { \
- mmsch_insert_direct_wt(&direct_wt, \
- init_table, (reg), \
- (value)); \
- init_table += sizeof(struct mmsch_v1_0_cmd_direct_write)/4; \
- table_size += sizeof(struct mmsch_v1_0_cmd_direct_write)/4; \
-}
-
-#define INSERT_DIRECT_POLL(reg, mask, wait) { \
- mmsch_insert_direct_poll(&direct_poll, \
- init_table, (reg), \
- (mask), (wait)); \
- init_table += sizeof(struct mmsch_v1_0_cmd_direct_polling)/4; \
- table_size += sizeof(struct mmsch_v1_0_cmd_direct_polling)/4; \
-}
-
/**
* vce_v4_0_ring_get_rptr - get read pointer
*
@@ -280,60 +223,73 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
init_table += header->vce_table_offset;
ring = &adev->vce.ring[0];
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR), ring->wptr);
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), ring->wptr);
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO), lower_32_bits(ring->gpu_addr));
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE), ring->ring_size / 4);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI),
+ upper_32_bits(ring->gpu_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE),
+ ring->ring_size / 4);
/* BEGING OF MC_RESUME */
- INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A), ~(1 << 16), 0);
- INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), ~0xFF9FF000, 0x1FF000);
- INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), ~0x3F, 0x3F);
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), 0x1FF);
-
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x398000);
- INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), ~0x1, 0);
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
-
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), adev->vce.gpu_addr >> 8);
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), adev->vce.gpu_addr >> 8);
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2), adev->vce.gpu_addr >> 8);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x398000);
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), ~0x1, 0);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
+ } else {
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
+ adev->vce.gpu_addr >> 8);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
+ adev->vce.gpu_addr >> 8);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
+ adev->vce.gpu_addr >> 8);
+ }
offset = AMDGPU_VCE_FIRMWARE_OFFSET;
size = VCE_V4_0_FW_SIZE;
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & 0x7FFFFFFF);
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
+ offset & 0x7FFFFFFF);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
offset += size;
size = VCE_V4_0_STACK_SIZE;
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1), offset & 0x7FFFFFFF);
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1),
+ offset & 0x7FFFFFFF);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
offset += size;
size = VCE_V4_0_DATA_SIZE;
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2), offset & 0x7FFFFFFF);
- INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2),
+ offset & 0x7FFFFFFF);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
- INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
- INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
- 0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
+ 0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
/* end of MC_RESUME */
- INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL),
- ~0x200001, VCE_VCPU_CNTL__CLK_EN_MASK);
- INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 0);
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
+ VCE_STATUS__JOB_BUSY_MASK, ~VCE_STATUS__JOB_BUSY_MASK);
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL),
+ ~0x200001, VCE_VCPU_CNTL__CLK_EN_MASK);
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 0);
- INSERT_DIRECT_POLL(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
- VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK,
- VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK);
+ MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
+ VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK,
+ VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK);
/* clear BUSY flag */
- INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
- ~VCE_STATUS__JOB_BUSY_MASK, 0);
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
+ ~VCE_STATUS__JOB_BUSY_MASK, 0);
/* add end packet */
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
@@ -494,20 +450,9 @@ static int vce_v4_0_sw_init(void *handle)
return r;
}
- if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->virt.mm_table.bo,
- &adev->virt.mm_table.gpu_addr,
- (void *)&adev->virt.mm_table.cpu_addr);
- if (!r) {
- memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
- printk("mm table gpu addr = 0x%llx, cpu addr = %p. \n",
- adev->virt.mm_table.gpu_addr,
- adev->virt.mm_table.cpu_addr);
- }
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
return r;
- }
return r;
}
@@ -518,10 +463,7 @@ static int vce_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* free MM table */
- if (amdgpu_sriov_vf(adev))
- amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
- &adev->virt.mm_table.gpu_addr,
- (void *)&adev->virt.mm_table.cpu_addr);
+ amdgpu_virt_free_mm_table(adev);
r = amdgpu_vce_suspend(adev);
if (r)
@@ -973,44 +915,37 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr)
{
+ struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
- unsigned eng = ring->idx;
- unsigned i;
+ unsigned eng = ring->vm_inv_eng;
pd_addr = pd_addr | 0x1; /* valid bit */
/* now only use physical base address of PDE and valid */
BUG_ON(pd_addr & 0xFFFF00000000003EULL);
- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
- struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
-
- amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
- amdgpu_ring_write(ring,
- (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, upper_32_bits(pd_addr));
-
- amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
- amdgpu_ring_write(ring,
- (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, lower_32_bits(pd_addr));
-
- amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
- amdgpu_ring_write(ring,
- (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
- amdgpu_ring_write(ring, 0xffffffff);
- amdgpu_ring_write(ring, lower_32_bits(pd_addr));
-
- /* flush TLB */
- amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
- amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
- amdgpu_ring_write(ring, req);
-
- /* wait for flush */
- amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
- amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
- amdgpu_ring_write(ring, 1 << vm_id);
- amdgpu_ring_write(ring, 1 << vm_id);
- }
+ amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
+ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
+ amdgpu_ring_write(ring, upper_32_bits(pd_addr));
+
+ amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
+ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+ amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+
+ amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
+ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+ amdgpu_ring_write(ring, 0xffffffff);
+ amdgpu_ring_write(ring, lower_32_bits(pd_addr));
+
+ /* flush TLB */
+ amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
+ amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
+ amdgpu_ring_write(ring, req);
+
+ /* wait for flush */
+ amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
+ amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
+ amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, 1 << vm_id);
}
static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -1078,12 +1013,13 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
.align_mask = 0x3f,
.nop = VCE_CMD_NO_OP,
.support_64bit_ptrs = false,
+ .vmhub = AMDGPU_MMHUB,
.get_rptr = vce_v4_0_ring_get_rptr,
.get_wptr = vce_v4_0_ring_get_wptr,
.set_wptr = vce_v4_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs_vm,
.emit_frame_size =
- 17 * AMDGPU_MAX_VMHUBS + /* vce_v4_0_emit_vm_flush */
+ 17 + /* vce_v4_0_emit_vm_flush */
5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */
1, /* vce_v4_0_ring_insert_end */
.emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 2ccf44e..1d1ac1e 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -138,6 +138,12 @@ struct amd_pp_profile {
uint8_t down_hyst;
};
+enum amd_fan_ctrl_mode {
+ AMD_FAN_CTRL_NONE = 0,
+ AMD_FAN_CTRL_MANUAL = 1,
+ AMD_FAN_CTRL_AUTO = 2,
+};
+
/* CG flags */
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 17b9d41..0a94f74 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -54,20 +54,6 @@ enum cgs_ind_reg {
};
/**
- * enum cgs_clock - Clocks controlled by the SMU
- */
-enum cgs_clock {
- CGS_CLOCK__SCLK,
- CGS_CLOCK__MCLK,
- CGS_CLOCK__VCLK,
- CGS_CLOCK__DCLK,
- CGS_CLOCK__ECLK,
- CGS_CLOCK__ACLK,
- CGS_CLOCK__ICLK,
- /* ... */
-};
-
-/**
* enum cgs_engine - Engines that can be statically power-gated
*/
enum cgs_engine {
@@ -81,15 +67,6 @@ enum cgs_engine {
/* ... */
};
-/**
- * enum cgs_voltage_planes - Voltage planes for external camera HW
- */
-enum cgs_voltage_planes {
- CGS_VOLTAGE_PLANE__SENSOR0,
- CGS_VOLTAGE_PLANE__SENSOR1,
- /* ... */
-};
-
/*
* enum cgs_ucode_id - Firmware types for different IPs
*/
@@ -147,17 +124,6 @@ enum cgs_resource_type {
};
/**
- * struct cgs_clock_limits - Clock limits
- *
- * Clocks are specified in 10KHz units.
- */
-struct cgs_clock_limits {
- unsigned min; /**< Minimum supported frequency */
- unsigned max; /**< Maxumim supported frequency */
- unsigned sustainable; /**< Thermally sustainable frequency */
-};
-
-/**
* struct cgs_firmware_info - Firmware information
*/
struct cgs_firmware_info {
@@ -221,54 +187,6 @@ struct cgs_acpi_method_info {
};
/**
- * cgs_gpu_mem_info() - Return information about memory heaps
- * @cgs_device: opaque device handle
- * @type: memory type
- * @mc_start: Start MC address of the heap (output)
- * @mc_size: MC address space size (output)
- * @mem_size: maximum amount of memory available for allocation (output)
- *
- * This function returns information about memory heaps. The type
- * parameter is used to select the memory heap. The mc_start and
- * mc_size for GART heaps may be bigger than the memory available for
- * allocation.
- *
- * mc_start and mc_size are undefined for non-contiguous FB memory
- * types, since buffers allocated with these types may or may not be
- * GART mapped.
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_gpu_mem_info_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
- uint64_t *mc_start, uint64_t *mc_size,
- uint64_t *mem_size);
-
-/**
- * cgs_gmap_kmem() - map kernel memory to GART aperture
- * @cgs_device: opaque device handle
- * @kmem: pointer to kernel memory
- * @size: size to map
- * @min_offset: minimum offset from start of GART aperture
- * @max_offset: maximum offset from start of GART aperture
- * @kmem_handle: kernel memory handle (output)
- * @mcaddr: MC address (output)
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_gmap_kmem_t)(struct cgs_device *cgs_device, void *kmem, uint64_t size,
- uint64_t min_offset, uint64_t max_offset,
- cgs_handle_t *kmem_handle, uint64_t *mcaddr);
-
-/**
- * cgs_gunmap_kmem() - unmap kernel memory
- * @cgs_device: opaque device handle
- * @kmem_handle: kernel memory handle returned by gmap_kmem
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_gunmap_kmem_t)(struct cgs_device *cgs_device, cgs_handle_t kmem_handle);
-
-/**
* cgs_alloc_gpu_mem() - Allocate GPU memory
* @cgs_device: opaque device handle
* @type: memory type
@@ -392,62 +310,6 @@ typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs
unsigned index, uint32_t value);
/**
- * cgs_read_pci_config_byte() - Read byte from PCI configuration space
- * @cgs_device: opaque device handle
- * @addr: address
- *
- * Return: Value read
- */
-typedef uint8_t (*cgs_read_pci_config_byte_t)(struct cgs_device *cgs_device, unsigned addr);
-
-/**
- * cgs_read_pci_config_word() - Read word from PCI configuration space
- * @cgs_device: opaque device handle
- * @addr: address, must be word-aligned
- *
- * Return: Value read
- */
-typedef uint16_t (*cgs_read_pci_config_word_t)(struct cgs_device *cgs_device, unsigned addr);
-
-/**
- * cgs_read_pci_config_dword() - Read dword from PCI configuration space
- * @cgs_device: opaque device handle
- * @addr: address, must be dword-aligned
- *
- * Return: Value read
- */
-typedef uint32_t (*cgs_read_pci_config_dword_t)(struct cgs_device *cgs_device,
- unsigned addr);
-
-/**
- * cgs_write_pci_config_byte() - Write byte to PCI configuration space
- * @cgs_device: opaque device handle
- * @addr: address
- * @value: value to write
- */
-typedef void (*cgs_write_pci_config_byte_t)(struct cgs_device *cgs_device, unsigned addr,
- uint8_t value);
-
-/**
- * cgs_write_pci_config_word() - Write byte to PCI configuration space
- * @cgs_device: opaque device handle
- * @addr: address, must be word-aligned
- * @value: value to write
- */
-typedef void (*cgs_write_pci_config_word_t)(struct cgs_device *cgs_device, unsigned addr,
- uint16_t value);
-
-/**
- * cgs_write_pci_config_dword() - Write byte to PCI configuration space
- * @cgs_device: opaque device handle
- * @addr: address, must be dword-aligned
- * @value: value to write
- */
-typedef void (*cgs_write_pci_config_dword_t)(struct cgs_device *cgs_device, unsigned addr,
- uint32_t value);
-
-
-/**
* cgs_get_pci_resource() - provide access to a device resource (PCI BAR)
* @cgs_device: opaque device handle
* @resource_type: Type of Resource (MMIO, IO, ROM, FB, DOORBELL)
@@ -501,87 +363,6 @@ typedef int (*cgs_atom_exec_cmd_table_t)(struct cgs_device *cgs_device,
unsigned table, void *args);
/**
- * cgs_create_pm_request() - Create a power management request
- * @cgs_device: opaque device handle
- * @request: handle of created PM request (output)
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_create_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t *request);
-
-/**
- * cgs_destroy_pm_request() - Destroy a power management request
- * @cgs_device: opaque device handle
- * @request: handle of created PM request
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_destroy_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t request);
-
-/**
- * cgs_set_pm_request() - Activate or deactiveate a PM request
- * @cgs_device: opaque device handle
- * @request: PM request handle
- * @active: 0 = deactivate, non-0 = activate
- *
- * While a PM request is active, its minimum clock requests are taken
- * into account as the requested engines are powered up. When the
- * request is inactive, the engines may be powered down and clocks may
- * be lower, depending on other PM requests by other driver
- * components.
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_set_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t request,
- int active);
-
-/**
- * cgs_pm_request_clock() - Request a minimum frequency for a specific clock
- * @cgs_device: opaque device handle
- * @request: PM request handle
- * @clock: which clock?
- * @freq: requested min. frequency in 10KHz units (0 to clear request)
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_pm_request_clock_t)(struct cgs_device *cgs_device, cgs_handle_t request,
- enum cgs_clock clock, unsigned freq);
-
-/**
- * cgs_pm_request_engine() - Request an engine to be powered up
- * @cgs_device: opaque device handle
- * @request: PM request handle
- * @engine: which engine?
- * @powered: 0 = powered down, non-0 = powered up
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_pm_request_engine_t)(struct cgs_device *cgs_device, cgs_handle_t request,
- enum cgs_engine engine, int powered);
-
-/**
- * cgs_pm_query_clock_limits() - Query clock frequency limits
- * @cgs_device: opaque device handle
- * @clock: which clock?
- * @limits: clock limits
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_pm_query_clock_limits_t)(struct cgs_device *cgs_device,
- enum cgs_clock clock,
- struct cgs_clock_limits *limits);
-
-/**
- * cgs_set_camera_voltages() - Apply specific voltages to PMIC voltage planes
- * @cgs_device: opaque device handle
- * @mask: bitmask of voltages to change (1<<CGS_VOLTAGE_PLANE__xyz|...)
- * @voltages: pointer to array of voltage values in 1mV units
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_set_camera_voltages_t)(struct cgs_device *cgs_device, uint32_t mask,
- const uint32_t *voltages);
-/**
* cgs_get_firmware_info - Get the firmware information from core driver
* @cgs_device: opaque device handle
* @type: the firmware type
@@ -627,9 +408,6 @@ typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
struct cgs_ops {
/* memory management calls (similar to KFD interface) */
- cgs_gpu_mem_info_t gpu_mem_info;
- cgs_gmap_kmem_t gmap_kmem;
- cgs_gunmap_kmem_t gunmap_kmem;
cgs_alloc_gpu_mem_t alloc_gpu_mem;
cgs_free_gpu_mem_t free_gpu_mem;
cgs_gmap_gpu_mem_t gmap_gpu_mem;
@@ -641,27 +419,12 @@ struct cgs_ops {
cgs_write_register_t write_register;
cgs_read_ind_register_t read_ind_register;
cgs_write_ind_register_t write_ind_register;
- /* PCI configuration space access */
- cgs_read_pci_config_byte_t read_pci_config_byte;
- cgs_read_pci_config_word_t read_pci_config_word;
- cgs_read_pci_config_dword_t read_pci_config_dword;
- cgs_write_pci_config_byte_t write_pci_config_byte;
- cgs_write_pci_config_word_t write_pci_config_word;
- cgs_write_pci_config_dword_t write_pci_config_dword;
/* PCI resources */
cgs_get_pci_resource_t get_pci_resource;
/* ATOM BIOS */
cgs_atom_get_data_table_t atom_get_data_table;
cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs;
cgs_atom_exec_cmd_table_t atom_exec_cmd_table;
- /* Power management */
- cgs_create_pm_request_t create_pm_request;
- cgs_destroy_pm_request_t destroy_pm_request;
- cgs_set_pm_request_t set_pm_request;
- cgs_pm_request_clock_t pm_request_clock;
- cgs_pm_request_engine_t pm_request_engine;
- cgs_pm_query_clock_limits_t pm_query_clock_limits;
- cgs_set_camera_voltages_t set_camera_voltages;
/* Firmware Info */
cgs_get_firmware_info get_firmware_info;
cgs_rel_firmware rel_firmware;
@@ -696,12 +459,6 @@ struct cgs_device
#define CGS_OS_CALL(func,dev,...) \
(((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
-#define cgs_gpu_mem_info(dev,type,mc_start,mc_size,mem_size) \
- CGS_CALL(gpu_mem_info,dev,type,mc_start,mc_size,mem_size)
-#define cgs_gmap_kmem(dev,kmem,size,min_off,max_off,kmem_handle,mcaddr) \
- CGS_CALL(gmap_kmem,dev,kmem,size,min_off,max_off,kmem_handle,mcaddr)
-#define cgs_gunmap_kmem(dev,kmem_handle) \
- CGS_CALL(gunmap_kmem,dev,keme_handle)
#define cgs_alloc_gpu_mem(dev,type,size,align,min_off,max_off,handle) \
CGS_CALL(alloc_gpu_mem,dev,type,size,align,min_off,max_off,handle)
#define cgs_free_gpu_mem(dev,handle) \
@@ -724,19 +481,6 @@ struct cgs_device
#define cgs_write_ind_register(dev,space,index,value) \
CGS_CALL(write_ind_register,dev,space,index,value)
-#define cgs_read_pci_config_byte(dev,addr) \
- CGS_CALL(read_pci_config_byte,dev,addr)
-#define cgs_read_pci_config_word(dev,addr) \
- CGS_CALL(read_pci_config_word,dev,addr)
-#define cgs_read_pci_config_dword(dev,addr) \
- CGS_CALL(read_pci_config_dword,dev,addr)
-#define cgs_write_pci_config_byte(dev,addr,value) \
- CGS_CALL(write_pci_config_byte,dev,addr,value)
-#define cgs_write_pci_config_word(dev,addr,value) \
- CGS_CALL(write_pci_config_word,dev,addr,value)
-#define cgs_write_pci_config_dword(dev,addr,value) \
- CGS_CALL(write_pci_config_dword,dev,addr,value)
-
#define cgs_atom_get_data_table(dev,table,size,frev,crev) \
CGS_CALL(atom_get_data_table,dev,table,size,frev,crev)
#define cgs_atom_get_cmd_table_revs(dev,table,frev,crev) \
@@ -744,20 +488,6 @@ struct cgs_device
#define cgs_atom_exec_cmd_table(dev,table,args) \
CGS_CALL(atom_exec_cmd_table,dev,table,args)
-#define cgs_create_pm_request(dev,request) \
- CGS_CALL(create_pm_request,dev,request)
-#define cgs_destroy_pm_request(dev,request) \
- CGS_CALL(destroy_pm_request,dev,request)
-#define cgs_set_pm_request(dev,request,active) \
- CGS_CALL(set_pm_request,dev,request,active)
-#define cgs_pm_request_clock(dev,request,clock,freq) \
- CGS_CALL(pm_request_clock,dev,request,clock,freq)
-#define cgs_pm_request_engine(dev,request,engine,powered) \
- CGS_CALL(pm_request_engine,dev,request,engine,powered)
-#define cgs_pm_query_clock_limits(dev,clock,limits) \
- CGS_CALL(pm_query_clock_limits,dev,clock,limits)
-#define cgs_set_camera_voltages(dev,mask,voltages) \
- CGS_CALL(set_camera_voltages,dev,mask,voltages)
#define cgs_get_firmware_info(dev, type, info) \
CGS_CALL(get_firmware_info, dev, type, info)
#define cgs_rel_firmware(dev, type) \
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 9da5b0b..f73e80c 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -251,7 +251,9 @@ static int pp_suspend(void *handle)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret == PP_DPM_DISABLED)
+ return 0;
+ else if (ret != 0)
return ret;
eventmgr = pp_handle->eventmgr;
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c
index 9ef2d90..b82c43a 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c
@@ -219,7 +219,7 @@ const pem_event_action notify_smu_suspend_tasks[] = {
};
const pem_event_action disable_smc_firmware_ctf_tasks[] = {
- /* PEM_Task_DisableSMCFirmwareCTF,*/
+ pem_task_disable_smc_firmware_ctf,
NULL
};
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
index e04216e..8c4ebaa 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
@@ -173,6 +173,11 @@ int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_even
return 0;
}
+int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
+{
+ return phm_disable_smc_firmware_ctf(eventmgr->hwmgr);
+}
+
int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
{
return phm_setup_asic(eventmgr->hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h
index 6c6297e..37e7ca5 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h
@@ -84,5 +84,6 @@ int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, str
/*thermal */
int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
+int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
#endif /* _EVENT_TASKS_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 23bba2c..fcc722e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -501,3 +501,13 @@ int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_i
return hwmgr->hwmgr_func->get_max_high_clocks(hwmgr, clocks);
}
+
+int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index b71525f..5602311 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -314,52 +314,45 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
le32_to_cpu(profile->gb_vdroop_table_ckson_a2);
param->ulGbFuseTableCksoffM1 =
le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1);
- param->usGbFuseTableCksoffM2 =
+ param->ulGbFuseTableCksoffM2 =
le16_to_cpu(profile->avfsgb_fuse_table_cksoff_m2);
param->ulGbFuseTableCksoffB =
le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b);
param->ulGbFuseTableCksonM1 =
le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1);
- param->usGbFuseTableCksonM2 =
+ param->ulGbFuseTableCksonM2 =
le16_to_cpu(profile->avfsgb_fuse_table_ckson_m2);
param->ulGbFuseTableCksonB =
le32_to_cpu(profile->avfsgb_fuse_table_ckson_b);
- param->usMaxVoltage025mv =
- le16_to_cpu(profile->max_voltage_0_25mv);
- param->ucEnableGbVdroopTableCksoff =
- profile->enable_gb_vdroop_table_cksoff;
+
param->ucEnableGbVdroopTableCkson =
profile->enable_gb_vdroop_table_ckson;
- param->ucEnableGbFuseTableCksoff =
- profile->enable_gb_fuse_table_cksoff;
param->ucEnableGbFuseTableCkson =
profile->enable_gb_fuse_table_ckson;
param->usPsmAgeComfactor =
le16_to_cpu(profile->psm_age_comfactor);
- param->ucEnableApplyAvfsCksoffVoltage =
- profile->enable_apply_avfs_cksoff_voltage;
param->ulDispclk2GfxclkM1 =
le32_to_cpu(profile->dispclk2gfxclk_a);
- param->usDispclk2GfxclkM2 =
+ param->ulDispclk2GfxclkM2 =
le16_to_cpu(profile->dispclk2gfxclk_b);
param->ulDispclk2GfxclkB =
le32_to_cpu(profile->dispclk2gfxclk_c);
param->ulDcefclk2GfxclkM1 =
le32_to_cpu(profile->dcefclk2gfxclk_a);
- param->usDcefclk2GfxclkM2 =
+ param->ulDcefclk2GfxclkM2 =
le16_to_cpu(profile->dcefclk2gfxclk_b);
param->ulDcefclk2GfxclkB =
le32_to_cpu(profile->dcefclk2gfxclk_c);
param->ulPixelclk2GfxclkM1 =
le32_to_cpu(profile->pixclk2gfxclk_a);
- param->usPixelclk2GfxclkM2 =
+ param->ulPixelclk2GfxclkM2 =
le16_to_cpu(profile->pixclk2gfxclk_b);
param->ulPixelclk2GfxclkB =
le32_to_cpu(profile->pixclk2gfxclk_c);
param->ulPhyclk2GfxclkM1 =
le32_to_cpu(profile->phyclk2gfxclk_a);
- param->usPhyclk2GfxclkM2 =
+ param->ulPhyclk2GfxclkM2 =
le16_to_cpu(profile->phyclk2gfxclk_b);
param->ulPhyclk2GfxclkB =
le32_to_cpu(profile->phyclk2gfxclk_c);
@@ -394,3 +387,31 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
return 0;
}
+
+int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
+ struct pp_atomfwctrl_bios_boot_up_values *boot_values)
+{
+ struct atom_firmware_info_v3_1 *info = NULL;
+ uint16_t ix;
+
+ ix = GetIndexIntoMasterDataTable(firmwareinfo);
+ info = (struct atom_firmware_info_v3_1 *)
+ cgs_atom_get_data_table(hwmgr->device,
+ ix, NULL, NULL, NULL);
+
+ if (!info) {
+ pr_info("Error retrieving BIOS firmwareinfo!");
+ return -EINVAL;
+ }
+
+ boot_values->ulRevision = info->firmware_revision;
+ boot_values->ulGfxClk = info->bootup_sclk_in10khz;
+ boot_values->ulUClk = info->bootup_mclk_in10khz;
+ boot_values->ulSocClk = 0;
+ boot_values->usVddc = info->bootup_vddc_mv;
+ boot_values->usVddci = info->bootup_vddci_mv;
+ boot_values->usMvddc = info->bootup_mvddc_mv;
+ boot_values->usVddGfx = info->bootup_vddgfx_mv;
+
+ return 0;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index 7efe9b9..43a6711 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -69,7 +69,7 @@ struct pp_atomfwctrl_clock_dividers_soc15 {
struct pp_atomfwctrl_avfs_parameters {
uint32_t ulMaxVddc;
uint32_t ulMinVddc;
- uint8_t ucMaxVidStep;
+
uint32_t ulMeanNsigmaAcontant0;
uint32_t ulMeanNsigmaAcontant1;
uint32_t ulMeanNsigmaAcontant2;
@@ -82,30 +82,30 @@ struct pp_atomfwctrl_avfs_parameters {
uint32_t ulGbVdroopTableCksonA0;
uint32_t ulGbVdroopTableCksonA1;
uint32_t ulGbVdroopTableCksonA2;
+
uint32_t ulGbFuseTableCksoffM1;
- uint16_t usGbFuseTableCksoffM2;
- uint32_t ulGbFuseTableCksoffB;\
+ uint32_t ulGbFuseTableCksoffM2;
+ uint32_t ulGbFuseTableCksoffB;
+
uint32_t ulGbFuseTableCksonM1;
- uint16_t usGbFuseTableCksonM2;
+ uint32_t ulGbFuseTableCksonM2;
uint32_t ulGbFuseTableCksonB;
- uint16_t usMaxVoltage025mv;
- uint8_t ucEnableGbVdroopTableCksoff;
+
uint8_t ucEnableGbVdroopTableCkson;
- uint8_t ucEnableGbFuseTableCksoff;
uint8_t ucEnableGbFuseTableCkson;
uint16_t usPsmAgeComfactor;
- uint8_t ucEnableApplyAvfsCksoffVoltage;
+
uint32_t ulDispclk2GfxclkM1;
- uint16_t usDispclk2GfxclkM2;
+ uint32_t ulDispclk2GfxclkM2;
uint32_t ulDispclk2GfxclkB;
uint32_t ulDcefclk2GfxclkM1;
- uint16_t usDcefclk2GfxclkM2;
+ uint32_t ulDcefclk2GfxclkM2;
uint32_t ulDcefclk2GfxclkB;
uint32_t ulPixelclk2GfxclkM1;
- uint16_t usPixelclk2GfxclkM2;
+ uint32_t ulPixelclk2GfxclkM2;
uint32_t ulPixelclk2GfxclkB;
uint32_t ulPhyclk2GfxclkM1;
- uint16_t usPhyclk2GfxclkM2;
+ uint32_t ulPhyclk2GfxclkM2;
uint32_t ulPhyclk2GfxclkB;
};
@@ -119,6 +119,18 @@ struct pp_atomfwctrl_gpio_parameters {
uint8_t ucFwCtfGpio;
uint8_t ucFwCtfPolarity;
};
+
+struct pp_atomfwctrl_bios_boot_up_values {
+ uint32_t ulRevision;
+ uint32_t ulGfxClk;
+ uint32_t ulUClk;
+ uint32_t ulSocClk;
+ uint16_t usVddc;
+ uint16_t usVddci;
+ uint16_t usMvddc;
+ uint16_t usVddGfx;
+};
+
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
uint32_t clock_type, uint32_t clock_value,
struct pp_atomfwctrl_clock_dividers_soc15 *dividers);
@@ -136,5 +148,8 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_gpio_parameters *param);
+int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
+ struct pp_atomfwctrl_bios_boot_up_values *boot_values);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 8f663ab..102eb6d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2655,6 +2655,28 @@ static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
return sizeof(struct smu7_power_state);
}
+static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
+ uint32_t vblank_time_us)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t switch_limit_us;
+
+ switch (hwmgr->chip_id) {
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+ break;
+ default:
+ switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
+ break;
+ }
+
+ if (vblank_time_us < switch_limit_us)
+ return true;
+ else
+ return false;
+}
static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *request_ps,
@@ -2669,6 +2691,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
bool disable_mclk_switching;
bool disable_mclk_switching_for_frame_lock;
struct cgs_display_info info = {0};
+ struct cgs_mode_info mode_info = {0};
const struct phm_clock_and_voltage_limits *max_limits;
uint32_t i;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2677,6 +2700,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
int32_t count;
int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
+ info.mode_info = &mode_info;
data->battery_state = (PP_StateUILabel_Battery ==
request_ps->classification.ui_label);
@@ -2703,8 +2727,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
cgs_get_active_displays_info(hwmgr->device, &info);
- /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
@@ -2769,8 +2791,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
- disable_mclk_switching = (1 < info.display_count) ||
- disable_mclk_switching_for_frame_lock;
+ disable_mclk_switching = ((1 < info.display_count) ||
+ disable_mclk_switching_for_frame_lock ||
+ smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
+ (mode_info.refresh_rate > 120));
sclk = smu7_ps->performance_levels[0].engine_clock;
mclk = smu7_ps->performance_levels[0].memory_clock;
@@ -4334,26 +4358,31 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
- if (mode) {
- /* stop auto-manage */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
- smu7_fan_ctrl_set_static_mode(hwmgr, mode);
- } else
- /* restart auto-manage */
- smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
+ int result = 0;
- return 0;
+ switch (mode) {
+ case AMD_FAN_CTRL_NONE:
+ result = smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ break;
+ case AMD_FAN_CTRL_MANUAL:
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl))
+ result = smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+ break;
+ case AMD_FAN_CTRL_AUTO:
+ result = smu7_fan_ctrl_set_static_mode(hwmgr, mode);
+ if (!result)
+ result = smu7_fan_ctrl_start_smc_fan_control(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return result;
}
static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
- if (hwmgr->fan_ctrl_is_in_default_mode)
- return hwmgr->fan_ctrl_default_mode;
- else
- return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, FDO_PWM_MODE);
+ return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
}
static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
@@ -4522,32 +4551,6 @@ static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type
return 0;
}
-static int smu7_request_firmware(struct pp_hwmgr *hwmgr)
-{
- int ret;
- struct cgs_firmware_info info = {0};
-
- ret = cgs_get_firmware_info(hwmgr->device,
- smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
- &info);
- if (ret || !info.kptr)
- return -EINVAL;
-
- return 0;
-}
-
-static int smu7_release_firmware(struct pp_hwmgr *hwmgr)
-{
- int ret;
-
- ret = cgs_rel_firmware(hwmgr->device,
- smu7_convert_fw_type_to_cgs(UCODE_ID_SMU));
- if (ret)
- return -EINVAL;
-
- return 0;
-}
-
static void smu7_find_min_clock_masks(struct pp_hwmgr *hwmgr,
uint32_t *sclk_mask, uint32_t *mclk_mask,
uint32_t min_sclk, uint32_t min_mclk)
@@ -4691,10 +4694,9 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_clock_by_type = smu7_get_clock_by_type,
.read_sensor = smu7_read_sensor,
.dynamic_state_management_disable = smu7_disable_dpm_tasks,
- .request_firmware = smu7_request_firmware,
- .release_firmware = smu7_release_firmware,
.set_power_profile_state = smu7_set_power_profile_state,
.avfs_control = smu7_avfs_control,
+ .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
};
uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index 436ca5c..baddb56 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -112,10 +112,9 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
*/
int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
-
if (hwmgr->fan_ctrl_is_in_default_mode) {
hwmgr->fan_ctrl_default_mode =
- PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL2, FDO_PWM_MODE);
hwmgr->tmin =
PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -149,7 +148,7 @@ int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
return 0;
}
-static int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
int result;
@@ -179,6 +178,7 @@ static int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetFanTemperatureTarget,
hwmgr->thermal_controller.
advanceFanControlParameters.ucTargetTemperature);
+ hwmgr->fan_ctrl_enabled = true;
return result;
}
@@ -186,6 +186,7 @@ static int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
+ hwmgr->fan_ctrl_enabled = false;
return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
}
@@ -280,7 +281,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_STATUS, TACH_PERIOD, tach_period);
- return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+ return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
}
/**
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
index 2ed774d..ba71b60 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
@@ -54,6 +54,6 @@ extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *spe
extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr);
-
+extern int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 8394955..2614af2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -111,6 +111,8 @@ static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
data->registry_data.mclk_dpm_key_disabled =
hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
+ data->registry_data.pcie_dpm_key_disabled =
+ hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
data->registry_data.dcefclk_dpm_key_disabled =
hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
@@ -121,7 +123,9 @@ static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.enable_tdc_limit_feature = 1;
}
- data->registry_data.pcie_dpm_key_disabled = 1;
+ data->registry_data.clock_stretcher_support =
+ hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? false : true;
+
data->registry_data.disable_water_mark = 0;
data->registry_data.fan_control_support = 1;
@@ -1133,7 +1137,7 @@ static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
int i;
for (i = 0; i < dep_table->count; i++) {
- if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value !=
+ if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
dep_table->entries[i].clk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_table->entries[i].clk;
@@ -1178,29 +1182,9 @@ static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
else
pcie_table->lclk[i] =
bios_pcie_table->entries[i].pcie_sclk;
-
- pcie_table->count++;
}
- if (data->registry_data.pcieSpeedOverride)
- pcie_table->pcie_gen[i] = data->registry_data.pcieSpeedOverride;
- else
- pcie_table->pcie_gen[i] =
- bios_pcie_table->entries[bios_pcie_table->count - 1].gen_speed;
-
- if (data->registry_data.pcieLaneOverride)
- pcie_table->pcie_lane[i] = data->registry_data.pcieLaneOverride;
- else
- pcie_table->pcie_lane[i] =
- bios_pcie_table->entries[bios_pcie_table->count - 1].lane_width;
-
- if (data->registry_data.pcieClockOverride)
- pcie_table->lclk[i] = data->registry_data.pcieClockOverride;
- else
- pcie_table->lclk[i] =
- bios_pcie_table->entries[bios_pcie_table->count - 1].pcie_sclk;
-
- pcie_table->count++;
+ pcie_table->count = NUM_LINK_LEVELS;
return 0;
}
@@ -1290,7 +1274,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
dpm_table = &(data->dpm_table.eclk_table);
for (i = 0; i < dep_mm_table->count; i++) {
if (i == 0 || dpm_table->dpm_levels
- [dpm_table->count - 1].value !=
+ [dpm_table->count - 1].value <=
dep_mm_table->entries[i].eclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].eclk;
@@ -1306,7 +1290,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
dpm_table = &(data->dpm_table.vclk_table);
for (i = 0; i < dep_mm_table->count; i++) {
if (i == 0 || dpm_table->dpm_levels
- [dpm_table->count - 1].value !=
+ [dpm_table->count - 1].value <=
dep_mm_table->entries[i].vclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].vclk;
@@ -1320,7 +1304,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
dpm_table = &(data->dpm_table.dclk_table);
for (i = 0; i < dep_mm_table->count; i++) {
if (i == 0 || dpm_table->dpm_levels
- [dpm_table->count - 1].value !=
+ [dpm_table->count - 1].value <=
dep_mm_table->entries[i].dclk) {
dpm_table->dpm_levels[dpm_table->count].value =
dep_mm_table->entries[i].dclk;
@@ -1432,9 +1416,7 @@ static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
(struct phm_ppt_v2_information *)(hwmgr->pptable);
data->smc_state_table.pp_table.UlvOffsetVid =
- (uint8_t)(table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 /
- VOLTAGE_VID_OFFSET_SCALE1);
+ (uint8_t)table_info->us_ulv_voltage_offset;
data->smc_state_table.pp_table.UlvSmnclkDid =
(uint8_t)(table_info->us_ulv_smnclk_did);
@@ -1553,7 +1535,11 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
current_gfxclk_level->FbMult =
cpu_to_le32(dividers.ulPll_fb_mult);
/* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
- current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport))
+ current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
+ else
+ current_gfxclk_level->SsOn = 0;
current_gfxclk_level->SsFbMult =
cpu_to_le32(dividers.ulPll_ss_fbsmult);
current_gfxclk_level->SsSlewFrac =
@@ -2044,10 +2030,10 @@ static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
table_info->vdd_dep_on_sclk;
uint32_t i;
- for (i = 0; dep_table->count; i++) {
+ for (i = 0; i < dep_table->count; i++) {
pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
- pp_table->CksVidOffset[i] = convert_to_vid(
- dep_table->entries[i].cks_voffset);
+ pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
+ * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
}
return 0;
@@ -2073,66 +2059,70 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
if (!result) {
pp_table->MinVoltageVid = (uint8_t)
- convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
- pp_table->MaxVoltageVid = (uint8_t)
convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
- pp_table->BtcGbVdroopTableCksOn.a0 =
- cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
- pp_table->BtcGbVdroopTableCksOn.a1 =
- cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
- pp_table->BtcGbVdroopTableCksOn.a2 =
- cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
+ pp_table->MaxVoltageVid = (uint8_t)
+ convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
+
+ pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
+ pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
+ pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
+ pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
+ pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
+ pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
+ pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
pp_table->BtcGbVdroopTableCksOff.a0 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
+ pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
pp_table->BtcGbVdroopTableCksOff.a1 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
+ pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
pp_table->BtcGbVdroopTableCksOff.a2 =
cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
+ pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
+
+ pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
+ pp_table->BtcGbVdroopTableCksOn.a0 =
+ cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
+ pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
+ pp_table->BtcGbVdroopTableCksOn.a1 =
+ cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
+ pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
+ pp_table->BtcGbVdroopTableCksOn.a2 =
+ cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
+ pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
pp_table->AvfsGbCksOn.m1 =
cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
pp_table->AvfsGbCksOn.m2 =
- cpu_to_le16(avfs_params.usGbFuseTableCksonM2);
+ cpu_to_le16(avfs_params.ulGbFuseTableCksonM2);
pp_table->AvfsGbCksOn.b =
cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
pp_table->AvfsGbCksOn.m1_shift = 24;
pp_table->AvfsGbCksOn.m2_shift = 12;
+ pp_table->AvfsGbCksOn.b_shift = 0;
+ pp_table->OverrideAvfsGbCksOn =
+ avfs_params.ucEnableGbFuseTableCkson;
pp_table->AvfsGbCksOff.m1 =
cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
pp_table->AvfsGbCksOff.m2 =
- cpu_to_le16(avfs_params.usGbFuseTableCksoffM2);
+ cpu_to_le16(avfs_params.ulGbFuseTableCksoffM2);
pp_table->AvfsGbCksOff.b =
cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
pp_table->AvfsGbCksOff.m1_shift = 24;
pp_table->AvfsGbCksOff.m2_shift = 12;
-
- pp_table->AConstant[0] =
- cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
- pp_table->AConstant[1] =
- cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
- pp_table->AConstant[2] =
- cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
- pp_table->DC_tol_sigma =
- cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
- pp_table->Platform_mean =
- cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
- pp_table->PSM_Age_CompFactor =
- cpu_to_le16(avfs_params.usPsmAgeComfactor);
- pp_table->Platform_sigma =
- cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
-
- for (i = 0; i < dep_table->count; i++)
- pp_table->StaticVoltageOffsetVid[i] = (uint8_t)
- (dep_table->entries[i].sclk_offset *
+ pp_table->AvfsGbCksOff.b_shift = 0;
+
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].sclk_offset == 0)
+ pp_table->StaticVoltageOffsetVid[i] = 248;
+ else
+ pp_table->StaticVoltageOffsetVid[i] =
+ (uint8_t)(dep_table->entries[i].sclk_offset *
VOLTAGE_VID_OFFSET_SCALE2 /
VOLTAGE_VID_OFFSET_SCALE1);
-
- pp_table->OverrideBtcGbCksOn =
- avfs_params.ucEnableGbVdroopTableCkson;
- pp_table->OverrideAvfsGbCksOn =
- avfs_params.ucEnableGbFuseTableCkson;
+ }
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->disp_clk_quad_eqn_a) &&
@@ -2141,20 +2131,21 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
(int32_t)data->disp_clk_quad_eqn_a;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
- (int16_t)data->disp_clk_quad_eqn_b;
+ (int32_t)data->disp_clk_quad_eqn_b;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
(int32_t)data->disp_clk_quad_eqn_c;
} else {
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
(int32_t)avfs_params.ulDispclk2GfxclkM1;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
- (int16_t)avfs_params.usDispclk2GfxclkM2;
+ (int32_t)avfs_params.ulDispclk2GfxclkM2;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
(int32_t)avfs_params.ulDispclk2GfxclkB;
}
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
+ pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->dcef_clk_quad_eqn_a) &&
@@ -2163,20 +2154,21 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
(int32_t)data->dcef_clk_quad_eqn_a;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
- (int16_t)data->dcef_clk_quad_eqn_b;
+ (int32_t)data->dcef_clk_quad_eqn_b;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
(int32_t)data->dcef_clk_quad_eqn_c;
} else {
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
(int32_t)avfs_params.ulDcefclk2GfxclkM1;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
- (int16_t)avfs_params.usDcefclk2GfxclkM2;
+ (int32_t)avfs_params.ulDcefclk2GfxclkM2;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
(int32_t)avfs_params.ulDcefclk2GfxclkB;
}
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
+ pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->pixel_clk_quad_eqn_a) &&
@@ -2185,21 +2177,21 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
(int32_t)data->pixel_clk_quad_eqn_a;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
- (int16_t)data->pixel_clk_quad_eqn_b;
+ (int32_t)data->pixel_clk_quad_eqn_b;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
(int32_t)data->pixel_clk_quad_eqn_c;
} else {
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
(int32_t)avfs_params.ulPixelclk2GfxclkM1;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
- (int16_t)avfs_params.usPixelclk2GfxclkM2;
+ (int32_t)avfs_params.ulPixelclk2GfxclkM2;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
(int32_t)avfs_params.ulPixelclk2GfxclkB;
}
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
-
+ pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->phy_clk_quad_eqn_a) &&
(PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
@@ -2207,20 +2199,21 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
(int32_t)data->phy_clk_quad_eqn_a;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
- (int16_t)data->phy_clk_quad_eqn_b;
+ (int32_t)data->phy_clk_quad_eqn_b;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
(int32_t)data->phy_clk_quad_eqn_c;
} else {
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
(int32_t)avfs_params.ulPhyclk2GfxclkM1;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
- (int16_t)avfs_params.usPhyclk2GfxclkM2;
+ (int32_t)avfs_params.ulPhyclk2GfxclkM2;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
(int32_t)avfs_params.ulPhyclk2GfxclkB;
}
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
+ pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
} else {
data->smu_features[GNLD_AVFS].supported = false;
}
@@ -2309,6 +2302,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
(struct phm_ppt_v2_information *)(hwmgr->pptable);
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct pp_atomfwctrl_voltage_table voltage_table;
+ struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
result = vega10_setup_default_dpm_tables(hwmgr);
PP_ASSERT_WITH_CODE(!result,
@@ -2331,6 +2325,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
(uint8_t)(table_info->uc_vce_dpm_voltage_mode);
pp_table->Mp0DpmVoltageMode =
(uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
+
pp_table->DisplayDpmVoltageMode =
(uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
@@ -2372,14 +2367,31 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
"Failed to initialize UVD Level!",
return result);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
+ if (data->registry_data.clock_stretcher_support) {
result = vega10_populate_clock_stretcher_table(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to populate Clock Stretcher Table!",
return result);
}
+ result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
+ if (!result) {
+ data->vbios_boot_state.vddc = boot_up_values.usVddc;
+ data->vbios_boot_state.vddci = boot_up_values.usVddci;
+ data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
+ data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
+ data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
+ data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
+ if (0 != boot_up_values.usVddc) {
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFloorSocVoltage,
+ (boot_up_values.usVddc * 4));
+ data->vbios_boot_state.bsoc_vddc_lock = true;
+ } else {
+ data->vbios_boot_state.bsoc_vddc_lock = false;
+ }
+ }
+
result = vega10_populate_avfs_parameters(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize AVFS Parameters!",
@@ -2404,35 +2416,9 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
- if (data->smu_features[GNLD_AVFS].supported) {
- uint32_t features_enabled;
- result = vega10_get_smc_features(hwmgr->smumgr, &features_enabled);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to Retrieve Enabled Features!",
- return result);
- if (!(features_enabled & (1 << FEATURE_AVFS_BIT))) {
- result = vega10_perform_btc(hwmgr->smumgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to Perform BTC!",
- return result);
- result = vega10_avfs_enable(hwmgr, true);
- PP_ASSERT_WITH_CODE(!result,
- "Attempt to enable AVFS feature Failed!",
- return result);
- result = vega10_save_vft_table(hwmgr->smumgr,
- (uint8_t *)&(data->smc_state_table.avfs_table));
- PP_ASSERT_WITH_CODE(!result,
- "Attempt to save VFT table Failed!",
+ result = vega10_avfs_enable(hwmgr, true);
+ PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
return result);
- } else {
- data->smu_features[GNLD_AVFS].enabled = true;
- result = vega10_restore_vft_table(hwmgr->smumgr,
- (uint8_t *)&(data->smc_state_table.avfs_table));
- PP_ASSERT_WITH_CODE(!result,
- "Attempt to restore VFT table Failed!",
- return result;);
- }
- }
return 0;
}
@@ -2457,6 +2443,26 @@ static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
return 0;
}
+static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_THERMAL].supported) {
+ if (!data->smu_features[GNLD_THERMAL].enabled)
+ pr_info("THERMAL Feature Already disabled!");
+
+ PP_ASSERT_WITH_CODE(
+ !vega10_enable_smc_features(hwmgr->smumgr,
+ false,
+ data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
+ "disable THERMAL Feature Failed!",
+ return -1);
+ data->smu_features[GNLD_THERMAL].enabled = false;
+ }
+
+ return 0;
+}
+
static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data =
@@ -2535,6 +2541,37 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
return 0;
}
+static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
+{
+ struct vega10_hwmgr *data =
+ (struct vega10_hwmgr *)(hwmgr->backend);
+ uint32_t i, feature_mask = 0;
+
+
+ if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
+ "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
+ data->smu_features[GNLD_LED_DISPLAY].enabled = true;
+ }
+
+ for (i = 0; i < GNLD_DPM_MAX; i++) {
+ if (data->smu_features[i].smu_feature_bitmap & bitmap) {
+ if (data->smu_features[i].supported) {
+ if (data->smu_features[i].enabled) {
+ feature_mask |= data->smu_features[i].
+ smu_feature_bitmap;
+ data->smu_features[i].enabled = false;
+ }
+ }
+ }
+ }
+
+ vega10_enable_smc_features(hwmgr->smumgr, false, feature_mask);
+
+ return 0;
+}
+
/**
* @brief Tell SMC to enabled the supported DPMs.
*
@@ -2576,6 +2613,12 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
data->smu_features[GNLD_LED_DISPLAY].enabled = true;
}
+ if (data->vbios_boot_state.bsoc_vddc_lock) {
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFloorSocVoltage, 0);
+ data->vbios_boot_state.bsoc_vddc_lock = false;
+ }
+
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_Falcon_QuickTransition)) {
if (data->smu_features[GNLD_ACDC].supported) {
@@ -2602,8 +2645,6 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to configure telemetry!",
return tmp_result);
- vega10_set_tools_address(hwmgr->smumgr);
-
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_NumOfDisplays, 0);
@@ -3880,32 +3921,36 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
- if (mode) {
- /* stop auto-manage */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
- vega10_fan_ctrl_set_static_mode(hwmgr, mode);
- } else
- /* restart auto-manage */
- vega10_fan_ctrl_reset_fan_speed_to_default(hwmgr);
+ int result = 0;
- return 0;
+ switch (mode) {
+ case AMD_FAN_CTRL_NONE:
+ result = vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ break;
+ case AMD_FAN_CTRL_MANUAL:
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl))
+ result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
+ break;
+ case AMD_FAN_CTRL_AUTO:
+ result = vega10_fan_ctrl_set_static_mode(hwmgr, mode);
+ if (!result)
+ result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return result;
}
static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
- uint32_t reg;
+ struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
- if (hwmgr->fan_ctrl_is_in_default_mode) {
- return hwmgr->fan_ctrl_default_mode;
- } else {
- reg = soc15_get_register_offset(THM_HWID, 0,
- mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
- return (cgs_read_register(hwmgr->device, reg) &
- CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >>
- CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
- }
+ if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
+ return AMD_FAN_CTRL_MANUAL;
+ else
+ return AMD_FAN_CTRL_AUTO;
}
static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
@@ -4141,62 +4186,63 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
- uint32_t i;
+ int i;
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
return -EINVAL;
switch (type) {
case PP_SCLK:
- if (data->registry_data.sclk_dpm_key_disabled)
- break;
-
for (i = 0; i < 32; i++) {
if (mask & (1 << i))
break;
}
+ data->smc_state_table.gfx_boot_level = i;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_SetSoftMinGfxclkByIndex,
- i),
- "Failed to set soft min sclk index!",
- return -1);
+ for (i = 31; i >= 0; i--) {
+ if (mask & (1 << i))
+ break;
+ }
+ data->smc_state_table.gfx_max_level = i;
+
+ PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
+ "Failed to upload boot level to lowest!",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
+ "Failed to upload dpm max level to highest!",
+ return -EINVAL);
break;
case PP_MCLK:
- if (data->registry_data.mclk_dpm_key_disabled)
- break;
-
for (i = 0; i < 32; i++) {
if (mask & (1 << i))
break;
}
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_SetSoftMinUclkByIndex,
- i),
- "Failed to set soft min mclk index!",
- return -1);
- break;
-
- case PP_PCIE:
- if (data->registry_data.pcie_dpm_key_disabled)
- break;
-
for (i = 0; i < 32; i++) {
if (mask & (1 << i))
break;
}
+ data->smc_state_table.mem_boot_level = i;
+
+ for (i = 31; i >= 0; i--) {
+ if (mask & (1 << i))
+ break;
+ }
+ data->smc_state_table.mem_max_level = i;
+
+ PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
+ "Failed to upload boot level to lowest!",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
+ "Failed to upload dpm max level to highest!",
+ return -EINVAL);
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_SetMinLinkDpmByIndex,
- i),
- "Failed to set min pcie index!",
- return -1);
break;
+
+ case PP_PCIE:
default:
break;
}
@@ -4395,11 +4441,55 @@ vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg
return is_update_required;
}
+static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ int tmp_result, result = 0;
+
+ tmp_result = (vega10_is_dpm_running(hwmgr)) ? 0 : -1;
+ PP_ASSERT_WITH_CODE(tmp_result == 0,
+ "DPM is not running right now, no need to disable DPM!",
+ return 0);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController))
+ vega10_disable_thermal_protection(hwmgr);
+
+ tmp_result = vega10_disable_power_containment(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable power containment!", result = tmp_result);
+
+ tmp_result = vega10_avfs_enable(hwmgr, false);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable AVFS!", result = tmp_result);
+
+ tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to stop DPM!", result = tmp_result);
+
+ return result;
+}
+
+static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ int result;
+
+ result = vega10_disable_dpm_tasks(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "[disable_dpm_tasks] Failed to disable DPM!",
+ );
+ data->water_marks_bitmap &= ~(WaterMarksLoaded);
+
+ return result;
+}
+
+
static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.backend_init = vega10_hwmgr_backend_init,
.backend_fini = vega10_hwmgr_backend_fini,
.asic_setup = vega10_setup_asic_task,
.dynamic_state_management_enable = vega10_enable_dpm_tasks,
+ .dynamic_state_management_disable = vega10_disable_dpm_tasks,
.get_num_of_pp_table_entries =
vega10_get_number_of_powerplay_table_entries,
.get_power_state_size = vega10_get_power_state_size,
@@ -4439,6 +4529,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.check_states_equal = vega10_check_states_equal,
.check_smc_update_required_for_display_configuration =
vega10_check_smc_update_required_for_display_configuration,
+ .power_off_asic = vega10_power_off_asic,
+ .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
};
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index 83c67b9..1912e08 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -177,8 +177,11 @@ struct vega10_dpmlevel_enable_mask {
};
struct vega10_vbios_boot_state {
+ bool bsoc_vddc_lock;
uint16_t vddc;
uint16_t vddci;
+ uint16_t mvddc;
+ uint16_t vdd_gfx;
uint32_t gfx_clock;
uint32_t mem_clock;
uint32_t soc_clock;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index f1e244c..3f72268 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -48,8 +48,8 @@ void vega10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
table->Tliquid1Limit = cpu_to_le16(tdp_table->usTemperatureLimitLiquid1);
table->Tliquid2Limit = cpu_to_le16(tdp_table->usTemperatureLimitLiquid2);
table->TplxLimit = cpu_to_le16(tdp_table->usTemperatureLimitPlx);
- table->LoadLineResistance = cpu_to_le16(
- hwmgr->platform_descriptor.LoadLineSlope);
+ table->LoadLineResistance =
+ hwmgr->platform_descriptor.LoadLineSlope * 256;
table->FitLimit = 0; /* Not used for Vega10 */
table->Liquid1_I2C_address = tdp_table->ucLiquid1_I2C_address;
@@ -113,6 +113,29 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr)
return result;
}
+int vega10_disable_power_containment(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data =
+ (struct vega10_hwmgr *)(hwmgr->backend);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (data->smu_features[GNLD_PPT].supported)
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ false, data->smu_features[GNLD_PPT].smu_feature_bitmap),
+ "Attempt to disable PPT feature Failed!",
+ data->smu_features[GNLD_PPT].supported = false);
+
+ if (data->smu_features[GNLD_TDC].supported)
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ false, data->smu_features[GNLD_TDC].smu_feature_bitmap),
+ "Attempt to disable PPT feature Failed!",
+ data->smu_features[GNLD_TDC].supported = false);
+ }
+
+ return 0;
+}
+
static int vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h
index d9662bf..9ecaa27 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h
@@ -60,6 +60,7 @@ int vega10_enable_smc_cac(struct pp_hwmgr *hwmgr);
int vega10_enable_power_containment(struct pp_hwmgr *hwmgr);
int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int vega10_power_control_set_level(struct pp_hwmgr *hwmgr);
+int vega10_disable_power_containment(struct pp_hwmgr *hwmgr);
#endif /* _VEGA10_POWERTUNE_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index 8b55ae0..00e9551 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -407,7 +407,7 @@ static int get_tdp_table(
tdp_table->ucPlx_I2C_address = power_tune_table->ucPlx_I2C_address;
tdp_table->ucPlx_I2C_Line = power_tune_table->ucPlx_I2C_LineSCL;
tdp_table->ucPlx_I2C_LineSDA = power_tune_table->ucPlx_I2C_LineSDA;
- hwmgr->platform_descriptor.LoadLineSlope = power_tune_table->usLoadLineResistance;
+ hwmgr->platform_descriptor.LoadLineSlope = le16_to_cpu(power_tune_table->usLoadLineResistance);
} else {
power_tune_table_v2 = (ATOM_Vega10_PowerTune_Table_V2 *)table;
tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table_v2->usSocketPowerLimit);
@@ -453,7 +453,7 @@ static int get_tdp_table(
tdp_table->ucPlx_I2C_LineSDA = sda;
hwmgr->platform_descriptor.LoadLineSlope =
- power_tune_table_v2->usLoadLineResistance;
+ le16_to_cpu(power_tune_table_v2->usLoadLineResistance);
}
*info_tdp_table = tdp_table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index f4d77b6..83e40fe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -381,14 +381,10 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
temp = cgs_read_register(hwmgr->device, reg);
- temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
- CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
+ temp = (temp & CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK) >>
+ CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT;
- /* Bit 9 means the reading is lower than the lowest usable value. */
- if (temp & 0x200)
- temp = VEGA10_THERMAL_MAXIMUM_TEMP_READING;
- else
- temp = temp & 0x1ff;
+ temp = temp & 0x1ff;
temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
@@ -424,23 +420,28 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL);
val = cgs_read_register(hwmgr->device, reg);
- val &= ~(THM_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK);
- val |= (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) <<
- THM_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT;
- val &= ~(THM_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
- val |= (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) <<
- THM_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT;
+
+ val &= (~THM_THERMAL_INT_CTRL__MAX_IH_CREDIT_MASK);
+ val |= (5 << THM_THERMAL_INT_CTRL__MAX_IH_CREDIT__SHIFT);
+
+ val &= (~THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA_MASK);
+ val |= (1 << THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA__SHIFT);
+
+ val &= (~THM_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK);
+ val |= ((high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
+ << THM_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT);
+
+ val &= (~THM_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
+ val |= ((low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
+ << THM_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT);
+
+ val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+
cgs_write_register(hwmgr->device, reg, val);
reg = soc15_get_register_offset(THM_HWID, 0,
mmTHM_TCON_HTC_BASE_IDX, mmTHM_TCON_HTC);
- val = cgs_read_register(hwmgr->device, reg);
- val &= ~(THM_TCON_HTC__HTC_TMP_LMT_MASK);
- val |= (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) <<
- THM_TCON_HTC__HTC_TMP_LMT__SHIFT;
- cgs_write_register(hwmgr->device, reg, val);
-
return 0;
}
@@ -482,18 +483,28 @@ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ uint32_t val = 0;
+ uint32_t reg;
if (data->smu_features[GNLD_FW_CTF].supported) {
if (data->smu_features[GNLD_FW_CTF].enabled)
printk("[Thermal_EnableAlert] FW CTF Already Enabled!\n");
+
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ true,
+ data->smu_features[GNLD_FW_CTF].smu_feature_bitmap),
+ "Attempt to Enable FW CTF feature Failed!",
+ return -1);
+ data->smu_features[GNLD_FW_CTF].enabled = true;
}
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
- true,
- data->smu_features[GNLD_FW_CTF].smu_feature_bitmap),
- "Attempt to Enable FW CTF feature Failed!",
- return -1);
- data->smu_features[GNLD_FW_CTF].enabled = true;
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
+
+ reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
+ cgs_write_register(hwmgr->device, reg, val);
+
return 0;
}
@@ -501,21 +512,27 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
* Disable thermal alerts on the RV770 thermal controller.
* @param hwmgr The address of the hardware manager.
*/
-static int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
+int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+ uint32_t reg;
if (data->smu_features[GNLD_FW_CTF].supported) {
if (!data->smu_features[GNLD_FW_CTF].enabled)
printk("[Thermal_EnableAlert] FW CTF Already disabled!\n");
- }
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
false,
data->smu_features[GNLD_FW_CTF].smu_feature_bitmap),
"Attempt to disable FW CTF feature Failed!",
return -1);
- data->smu_features[GNLD_FW_CTF].enabled = false;
+ data->smu_features[GNLD_FW_CTF].enabled = false;
+ }
+
+ reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
+ cgs_write_register(hwmgr->device, reg, 0);
+
return 0;
}
@@ -561,6 +578,11 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
table->FanTargetTemperature = hwmgr->thermal_controller.
advanceFanControlParameters.usTMax;
+
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanTemperatureTarget,
+ (uint32_t)table->FanTargetTemperature);
+
table->FanPwmMin = hwmgr->thermal_controller.
advanceFanControlParameters.usPWMMin * 255 / 100;
table->FanTargetGfxclk = (uint16_t)(hwmgr->thermal_controller.
@@ -687,17 +709,17 @@ static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
static struct phm_master_table_item
vega10_thermal_start_thermal_controller_master_list[] = {
- {NULL, tf_vega10_thermal_initialize},
- {NULL, tf_vega10_thermal_set_temperature_range},
- {NULL, tf_vega10_thermal_enable_alert},
+ { .tableFunction = tf_vega10_thermal_initialize },
+ { .tableFunction = tf_vega10_thermal_set_temperature_range },
+ { .tableFunction = tf_vega10_thermal_enable_alert },
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this
* so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
- {NULL, tf_vega10_thermal_setup_fan_table},
- {NULL, tf_vega10_thermal_start_smc_fan_control},
- {NULL, NULL}
+ { .tableFunction = tf_vega10_thermal_setup_fan_table },
+ { .tableFunction = tf_vega10_thermal_start_smc_fan_control },
+ { }
};
static struct phm_master_table_header
@@ -709,10 +731,10 @@ vega10_thermal_start_thermal_controller_master = {
static struct phm_master_table_item
vega10_thermal_set_temperature_range_master_list[] = {
- {NULL, tf_vega10_thermal_disable_alert},
- {NULL, tf_vega10_thermal_set_temperature_range},
- {NULL, tf_vega10_thermal_enable_alert},
- {NULL, NULL}
+ { .tableFunction = tf_vega10_thermal_disable_alert },
+ { .tableFunction = tf_vega10_thermal_set_temperature_range },
+ { .tableFunction = tf_vega10_thermal_enable_alert },
+ { }
};
struct phm_master_table_header
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
index 8036808..776f3a2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
@@ -78,6 +78,8 @@ extern int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
uint32_t *speed);
extern int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
+extern int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr);
+int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 5345b50..a1ebe10 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -431,6 +431,6 @@ extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock);
extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
-
+extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr);
#endif /* _HARDWARE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 320225d..805b9df 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -368,11 +368,10 @@ struct pp_hwmgr_func {
int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, void *value, int *size);
- int (*request_firmware)(struct pp_hwmgr *hwmgr);
- int (*release_firmware)(struct pp_hwmgr *hwmgr);
int (*set_power_profile_state)(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request);
int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable);
+ int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr);
};
struct pp_table_func {
@@ -765,6 +764,7 @@ struct pp_hwmgr {
struct pp_thermal_controller_info thermal_controller;
bool fan_ctrl_is_in_default_mode;
uint32_t fan_ctrl_default_mode;
+ bool fan_ctrl_enabled;
uint32_t tmin;
struct phm_microcode_version_info microcode_version_info;
uint32_t ps_size;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
index 2037910..d43f98a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
@@ -30,7 +30,7 @@
* SMU TEAM: Always increment the interface version if
* any structure is changed in this file
*/
-#define SMU9_DRIVER_IF_VERSION 0xB
+#define SMU9_DRIVER_IF_VERSION 0xD
#define PPTABLE_V10_SMU_VERSION 1
@@ -302,7 +302,17 @@ typedef struct {
uint32_t DpmLevelPowerDelta;
- uint32_t Reserved[19];
+ uint8_t EnableBoostState;
+ uint8_t AConstant_Shift;
+ uint8_t DC_tol_sigma_Shift;
+ uint8_t PSM_Age_CompFactor_Shift;
+
+ uint16_t BoostStartTemperature;
+ uint16_t BoostStopTemperature;
+
+ PllSetting_t GfxBoostState;
+
+ uint32_t Reserved[14];
/* Padding - ignore */
uint32_t MmHubPadding[7]; /* SMU internal use */
@@ -464,4 +474,8 @@ typedef struct {
#define DB_PCC_SHIFT 26
#define DB_EDC_SHIFT 27
+#define REMOVE_FMAX_MARGIN_BIT 0x0
+#define REMOVE_DCTOL_MARGIN_BIT 0x1
+#define REMOVE_PLATFORM_MARGIN_BIT 0x2
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
index 90beef3..254974d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
@@ -122,7 +122,10 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_SetFanMinPwm 0x52
#define PPSMC_MSG_ConfigureGfxDidt 0x55
#define PPSMC_MSG_NumOfDisplays 0x56
-#define PPSMC_Message_Count 0x57
+#define PPSMC_MSG_ReadSerialNumTop32 0x58
+#define PPSMC_MSG_ReadSerialNumBottom32 0x59
+#define PPSMC_Message_Count 0x5A
+
typedef int PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 2685f02..115f0e4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -74,18 +74,18 @@ static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr)
return false;
}
-/**
-* Check if SMC has responded to previous message.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @return TRUE SMC has responded, FALSE otherwise.
-*/
+/*
+ * Check if SMC has responded to previous message.
+ *
+ * @param smumgr the address of the powerplay hardware manager.
+ * @return TRUE SMC has responded, FALSE otherwise.
+ */
static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr)
{
uint32_t reg;
if (!vega10_is_smc_ram_running(smumgr))
- return -1;
+ return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
@@ -96,20 +96,19 @@ static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr)
return cgs_read_register(smumgr->device, reg);
}
-/**
-* Send a message to the SMC, and do not wait for its response.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param msg the message to send.
-* @return Always return 0.
-*/
+/*
+ * Send a message to the SMC, and do not wait for its response.
+ * @param smumgr the address of the powerplay hardware manager.
+ * @param msg the message to send.
+ * @return Always return 0.
+ */
int vega10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr,
uint16_t msg)
{
uint32_t reg;
if (!vega10_is_smc_ram_running(smumgr))
- return -1;
+ return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
@@ -118,19 +117,18 @@ int vega10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr,
return 0;
}
-/**
-* Send a message to the SMC, and wait for its response.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param msg the message to send.
-* @return The response that came from the SMC.
-*/
+/*
+ * Send a message to the SMC, and wait for its response.
+ * @param smumgr the address of the powerplay hardware manager.
+ * @param msg the message to send.
+ * @return Always return 0.
+ */
int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
{
uint32_t reg;
if (!vega10_is_smc_ram_running(smumgr))
- return -1;
+ return -EINVAL;
vega10_wait_for_response(smumgr);
@@ -140,19 +138,18 @@ int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
vega10_send_msg_to_smc_without_waiting(smumgr, msg);
- PP_ASSERT_WITH_CODE(vega10_wait_for_response(smumgr) == 1,
- "Failed to send Message.",
- return -1);
+ if (vega10_wait_for_response(smumgr) != 1)
+ pr_err("Failed to send message: 0x%x\n", msg);
return 0;
}
-/**
+/*
* Send a message to the SMC with parameter
* @param smumgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
- * @return The response that came from the SMC.
+ * @return Always return 0.
*/
int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
uint16_t msg, uint32_t parameter)
@@ -160,7 +157,7 @@ int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
uint32_t reg;
if (!vega10_is_smc_ram_running(smumgr))
- return -1;
+ return -EINVAL;
vega10_wait_for_response(smumgr);
@@ -174,22 +171,20 @@ int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
vega10_send_msg_to_smc_without_waiting(smumgr, msg);
- PP_ASSERT_WITH_CODE(vega10_wait_for_response(smumgr) == 1,
- "Failed to send Message.",
- return -1);
+ if (vega10_wait_for_response(smumgr) != 1)
+ pr_err("Failed to send message: 0x%x\n", msg);
return 0;
}
-/**
-* Send a message to the SMC with parameter, do not wait for response
-*
-* @param smumgr: the address of the powerplay hardware manager.
-* @param msg: the message to send.
-* @param parameter: the parameter to send
-* @return The response that came from the SMC.
-*/
+/*
+ * Send a message to the SMC with parameter, do not wait for response
+ * @param smumgr: the address of the powerplay hardware manager.
+ * @param msg: the message to send.
+ * @param parameter: the parameter to send
+ * @return The response that came from the SMC.
+ */
int vega10_send_msg_to_smc_with_parameter_without_waiting(
struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
{
@@ -202,13 +197,12 @@ int vega10_send_msg_to_smc_with_parameter_without_waiting(
return vega10_send_msg_to_smc_without_waiting(smumgr, msg);
}
-/**
-* Retrieve an argument from SMC.
-*
-* @param smumgr the address of the powerplay hardware manager.
-* @param arg pointer to store the argument from SMC.
-* @return Always return 0.
-*/
+/*
+ * Retrieve an argument from SMC.
+ * @param smumgr the address of the powerplay hardware manager.
+ * @param arg pointer to store the argument from SMC.
+ * @return Always return 0.
+ */
int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg)
{
uint32_t reg;
@@ -221,11 +215,11 @@ int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg)
return 0;
}
-/**
-* Copy table from SMC into driver FB
-* @param smumgr the address of the SMC manager
-* @param table_id the driver's table ID to copy from
-*/
+/*
+ * Copy table from SMC into driver FB
+ * @param smumgr the address of the SMC manager
+ * @param table_id the driver's table ID to copy from
+ */
int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
uint8_t *table, int16_t table_id)
{
@@ -233,25 +227,25 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
(struct vega10_smumgr *)(smumgr->backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
- "Invalid SMU Table ID!", return -1;);
+ "Invalid SMU Table ID!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
- "Invalid SMU Table version!", return -1;);
+ "Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
- "Invalid SMU Table Length!", return -1;);
+ "Invalid SMU Table Length!", return -EINVAL);
PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
- "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -1;);
+ "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
- return -1;);
+ return -EINVAL);
PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
- return -1;);
+ return -EINVAL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -259,11 +253,11 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
return 0;
}
-/**
-* Copy table from Driver FB into SMC
-* @param smumgr the address of the SMC manager
-* @param table_id the table to copy from
-*/
+/*
+ * Copy table from Driver FB into SMC
+ * @param smumgr the address of the SMC manager
+ * @param table_id the table to copy from
+ */
int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
uint8_t *table, int16_t table_id)
{
@@ -271,11 +265,11 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
(struct vega10_smumgr *)(smumgr->backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
- "Invalid SMU Table ID!", return -1;);
+ "Invalid SMU Table ID!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
- "Invalid SMU Table version!", return -1;);
+ "Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
- "Invalid SMU Table Length!", return -1;);
+ "Invalid SMU Table Length!", return -EINVAL);
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
@@ -284,27 +278,18 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
- return -1;);
+ return -EINVAL;);
PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
- return -1;);
+ return -EINVAL);
PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
PPSMC_MSG_TransferTableDram2Smu,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
- return -1;);
-
- return 0;
-}
+ return -EINVAL);
-int vega10_perform_btc(struct pp_smumgr *smumgr)
-{
- PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc_with_parameter(
- smumgr, PPSMC_MSG_RunBtc, 0),
- "Attempt to run DC BTC Failed!",
- return -1);
return 0;
}
@@ -312,7 +297,7 @@ int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table)
{
PP_ASSERT_WITH_CODE(avfs_table,
"No access to SMC AVFS Table",
- return -1);
+ return -EINVAL);
return vega10_copy_table_from_smc(smumgr, avfs_table, AVFSTABLE);
}
@@ -321,7 +306,7 @@ int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table)
{
PP_ASSERT_WITH_CODE(avfs_table,
"No access to SMC AVFS Table",
- return -1);
+ return -EINVAL);
return vega10_copy_table_to_smc(smumgr, avfs_table, AVFSTABLE);
}
@@ -339,13 +324,16 @@ int vega10_enable_smc_features(struct pp_smumgr *smumgr,
int vega10_get_smc_features(struct pp_smumgr *smumgr,
uint32_t *features_enabled)
{
+ if (features_enabled == NULL)
+ return -EINVAL;
+
if (!vega10_send_msg_to_smc(smumgr,
PPSMC_MSG_GetEnabledSmuFeatures)) {
- if (!vega10_read_arg_from_smc(smumgr, features_enabled))
- return 0;
+ vega10_read_arg_from_smc(smumgr, features_enabled);
+ return 0;
}
- return -1;
+ return -EINVAL;
}
int vega10_set_tools_address(struct pp_smumgr *smumgr)
@@ -372,25 +360,20 @@ static int vega10_verify_smc_interface(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr,
PPSMC_MSG_GetDriverIfVersion),
"Attempt to get SMC IF Version Number Failed!",
- return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(smumgr,
- &smc_driver_if_version),
- "Attempt to read SMC IF Version Number Failed!",
- return -1);
-
- if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION)
- return -1;
+ return -EINVAL);
+ vega10_read_arg_from_smc(smumgr, &smc_driver_if_version);
+
+ if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) {
+ pr_err("Your firmware(0x%x) doesn't match \
+ SMU9_DRIVER_IF_VERSION(0x%x). \
+ Please update your firmware!\n",
+ smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
+ return -EINVAL;
+ }
return 0;
}
-/**
-* Write a 32bit value to the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smc_addr the address in the SMC RAM to access.
-* @param value to write to the SMC SRAM.
-*/
static int vega10_smu_init(struct pp_smumgr *smumgr)
{
struct vega10_smumgr *priv;
@@ -427,7 +410,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
kfree(smumgr->backend);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)handle);
- return -1);
+ return -EINVAL);
priv->smu_tables.entry[PPTABLE].version = 0x01;
priv->smu_tables.entry[PPTABLE].size = sizeof(PPTable_t);
@@ -455,7 +438,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)handle);
- return -1);
+ return -EINVAL);
priv->smu_tables.entry[WMTABLE].version = 0x01;
priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
@@ -485,7 +468,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)handle);
- return -1);
+ return -EINVAL);
priv->smu_tables.entry[AVFSTABLE].version = 0x01;
priv->smu_tables.entry[AVFSTABLE].size = sizeof(AvfsTable_t);
@@ -497,7 +480,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
priv->smu_tables.entry[AVFSTABLE].table = kaddr;
priv->smu_tables.entry[AVFSTABLE].handle = handle;
- tools_size = 0;
+ tools_size = 0x19000;
if (tools_size) {
smu_allocate_memory(smumgr->device,
tools_size,
@@ -517,9 +500,44 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
smu_lower_32_bits(mc_addr);
priv->smu_tables.entry[TOOLSTABLE].table = kaddr;
priv->smu_tables.entry[TOOLSTABLE].handle = handle;
+ vega10_set_tools_address(smumgr);
}
}
+ /* allocate space for AVFS Fuse table */
+ smu_allocate_memory(smumgr->device,
+ sizeof(AvfsFuseOverride_t),
+ CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+ PAGE_SIZE,
+ &mc_addr,
+ &kaddr,
+ &handle);
+
+ PP_ASSERT_WITH_CODE(kaddr,
+ "[vega10_smu_init] Out of memory for avfs fuse table.",
+ kfree(smumgr->backend);
+ cgs_free_gpu_mem(smumgr->device,
+ (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
+ cgs_free_gpu_mem(smumgr->device,
+ (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
+ cgs_free_gpu_mem(smumgr->device,
+ (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
+ cgs_free_gpu_mem(smumgr->device,
+ (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
+ cgs_free_gpu_mem(smumgr->device,
+ (cgs_handle_t)handle);
+ return -EINVAL);
+
+ priv->smu_tables.entry[AVFSFUSETABLE].version = 0x01;
+ priv->smu_tables.entry[AVFSFUSETABLE].size = sizeof(AvfsFuseOverride_t);
+ priv->smu_tables.entry[AVFSFUSETABLE].table_id = TABLE_AVFS_FUSE_OVERRIDE;
+ priv->smu_tables.entry[AVFSFUSETABLE].table_addr_high =
+ smu_upper_32_bits(mc_addr);
+ priv->smu_tables.entry[AVFSFUSETABLE].table_addr_low =
+ smu_lower_32_bits(mc_addr);
+ priv->smu_tables.entry[AVFSFUSETABLE].table = kaddr;
+ priv->smu_tables.entry[AVFSFUSETABLE].handle = handle;
+
return 0;
}
@@ -538,6 +556,8 @@ static int vega10_smu_fini(struct pp_smumgr *smumgr)
if (priv->smu_tables.entry[TOOLSTABLE].table)
cgs_free_gpu_mem(smumgr->device,
(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
+ cgs_free_gpu_mem(smumgr->device,
+ (cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle);
kfree(smumgr->backend);
smumgr->backend = NULL;
}
@@ -548,7 +568,7 @@ static int vega10_start_smu(struct pp_smumgr *smumgr)
{
PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(smumgr),
"Failed to verify SMC interface!",
- return -1);
+ return -EINVAL);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
index ad05021..821425c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
@@ -30,6 +30,7 @@ enum smu_table_id {
WMTABLE,
AVFSTABLE,
TOOLSTABLE,
+ AVFSFUSETABLE,
MAX_SMU_TABLE,
};
@@ -62,7 +63,6 @@ int vega10_get_smc_features(struct pp_smumgr *smumgr,
uint32_t *features_enabled);
int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table);
int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table);
-int vega10_perform_btc(struct pp_smumgr *smumgr);
int vega10_set_tools_address(struct pp_smumgr *smumgr);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index acd882a..fea96a7 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -236,6 +236,23 @@ static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
dma_fence_put(f);
}
+bool amd_sched_dependency_optimized(struct dma_fence* fence,
+ struct amd_sched_entity *entity)
+{
+ struct amd_gpu_scheduler *sched = entity->sched;
+ struct amd_sched_fence *s_fence;
+
+ if (!fence || dma_fence_is_signaled(fence))
+ return false;
+ if (fence->context == entity->fence_context)
+ return true;
+ s_fence = to_amd_sched_fence(fence);
+ if (s_fence && s_fence->sched == sched)
+ return true;
+
+ return false;
+}
+
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->sched;
@@ -387,7 +404,9 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
- if (dma_fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+ if (s_job->s_fence->parent &&
+ dma_fence_remove_callback(s_job->s_fence->parent,
+ &s_job->s_fence->cb)) {
dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = NULL;
}
@@ -460,9 +479,9 @@ int amd_sched_job_init(struct amd_sched_job *job,
job->sched = sched;
job->s_entity = entity;
job->s_fence = amd_sched_fence_create(entity, owner);
- job->id = atomic64_inc_return(&sched->job_id_count);
if (!job->s_fence)
return -ENOMEM;
+ job->id = atomic64_inc_return(&sched->job_id_count);
INIT_WORK(&job->finish_work, amd_sched_job_finish);
INIT_LIST_HEAD(&job->node);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 0255c7f..924d4a5 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -158,4 +158,6 @@ int amd_sched_job_init(struct amd_sched_job *job,
void *owner);
void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
+bool amd_sched_dependency_optimized(struct dma_fence* fence,
+ struct amd_sched_entity *entity);
#endif
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 798a3cc..1a3359c 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -10,6 +10,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -226,16 +227,33 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- u32 src_w, src_h;
+ struct drm_rect clip = { 0 };
+ struct drm_crtc_state *crtc_state;
+ u32 src_h = state->src_h >> 16;
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
+ /* only the HDLCD_REG_FB_LINE_COUNT register has a limit */
+ if (src_h >= HDLCD_MAX_YRES) {
+ DRM_DEBUG_KMS("Invalid source width: %d\n", src_h);
+ return -EINVAL;
+ }
+
+ if (!state->fb || !state->crtc)
+ return 0;
- /* we can't do any scaling of the plane source */
- if ((src_w != state->crtc_w) || (src_h != state->crtc_h))
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+ if (!crtc_state) {
+ DRM_DEBUG_KMS("Invalid crtc state\n");
return -EINVAL;
+ }
- return 0;
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+
+ return drm_plane_helper_check_state(state, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
}
static void hdlcd_plane_atomic_update(struct drm_plane *plane,
@@ -244,21 +262,20 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
struct drm_framebuffer *fb = plane->state->fb;
struct hdlcd_drm_private *hdlcd;
struct drm_gem_cma_object *gem;
- u32 src_w, src_h, dest_w, dest_h;
+ u32 src_x, src_y, dest_h;
dma_addr_t scanout_start;
if (!fb)
return;
- src_w = plane->state->src_w >> 16;
- src_h = plane->state->src_h >> 16;
- dest_w = plane->state->crtc_w;
- dest_h = plane->state->crtc_h;
+ src_x = plane->state->src.x1 >> 16;
+ src_y = plane->state->src.y1 >> 16;
+ dest_h = drm_rect_height(&plane->state->dst);
gem = drm_fb_cma_get_gem_obj(fb, 0);
+
scanout_start = gem->paddr + fb->offsets[0] +
- plane->state->crtc_y * fb->pitches[0] +
- plane->state->crtc_x *
- fb->format->cpp[0];
+ src_y * fb->pitches[0] +
+ src_x * fb->format->cpp[0];
hdlcd = plane->dev->dev_private;
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
@@ -305,7 +322,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
formats, ARRAY_SIZE(formats),
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
- devm_kfree(drm->dev, plane);
return ERR_PTR(ret);
}
@@ -329,7 +345,6 @@ int hdlcd_setup_crtc(struct drm_device *drm)
&hdlcd_crtc_funcs, NULL);
if (ret) {
hdlcd_plane_destroy(primary);
- devm_kfree(drm->dev, primary);
return ret;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 65a3bd7..423dda2 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -152,8 +152,7 @@ static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
- const struct device_node *np)
+static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
struct atmel_hlcdc_rgb_output *output;
@@ -161,6 +160,11 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
struct drm_bridge *bridge;
int ret;
+ ret = drm_of_find_panel_or_bridge(dev->dev->of_node, 0, endpoint,
+ &panel, &bridge);
+ if (ret)
+ return ret;
+
output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL);
if (!output)
return -EINVAL;
@@ -177,10 +181,6 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
output->encoder.possible_crtcs = 0x1;
- ret = drm_of_find_panel_or_bridge(np, 0, 0, &panel, &bridge);
- if (ret)
- return ret;
-
if (panel) {
output->connector.dpms = DRM_MODE_DPMS_OFF;
output->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
@@ -220,22 +220,14 @@ err_encoder_cleanup:
int atmel_hlcdc_create_outputs(struct drm_device *dev)
{
- struct device_node *remote;
- int ret = -ENODEV;
- int endpoint = 0;
-
- while (true) {
- /* Loop thru possible multiple connections to the output */
- remote = of_graph_get_remote_node(dev->dev->of_node, 0,
- endpoint++);
- if (!remote)
- break;
-
- ret = atmel_hlcdc_attach_endpoint(dev, remote);
- of_node_put(remote);
- if (ret)
- return ret;
- }
+ int endpoint, ret = 0;
+
+ for (endpoint = 0; !ret; endpoint++)
+ ret = atmel_hlcdc_attach_endpoint(dev, endpoint);
+
+ /* At least one device was successfully attached.*/
+ if (ret == -ENODEV && endpoint)
+ return 0;
return ret;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index 40d2827..53e78d0 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -1,6 +1,7 @@
config DRM_DW_HDMI
tristate
select DRM_KMS_HELPER
+ select REGMAP_MMIO
config DRM_DW_HDMI_AHB_AUDIO
tristate "Synopsys Designware AHB Audio interface"
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 8be9719..aa885a6 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -508,6 +508,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
bool has_connectors =
!!new_crtc_state->connector_mask;
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
crtc->base.id, crtc->name);
@@ -551,6 +553,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
const struct drm_connector_helper_funcs *funcs = connector->helper_private;
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
/*
* This only sets crtc->connectors_changed for routing changes,
* drivers must set crtc->connectors_changed themselves when
@@ -650,6 +654,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
+ WARN_ON(!drm_modeset_is_locked(&plane->mutex));
+
funcs = plane->helper_private;
drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
@@ -2663,7 +2669,12 @@ int drm_atomic_helper_resume(struct drm_device *dev,
drm_modeset_acquire_init(&ctx, 0);
while (1) {
+ err = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (err)
+ goto out;
+
err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
+out:
if (err != -EDEADLK)
break;
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 9f84761..48ca245 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (!connector)
return -ENOENT;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- encoder = drm_connector_get_encoder(connector);
- if (encoder)
- out_resp->encoder_id = encoder->base.id;
- else
- out_resp->encoder_id = 0;
-
- ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
- (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
- (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
- &out_resp->count_props);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
- if (ret)
- goto out_unref;
-
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
if (connector->encoder_ids[i] != 0)
encoders_count++;
@@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (put_user(connector->encoder_ids[i],
encoder_ptr + copied)) {
ret = -EFAULT;
- goto out_unref;
+ goto out;
}
copied++;
}
@@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) {
ret = -EFAULT;
+ mutex_unlock(&dev->mode_config.mutex);
+
goto out;
}
copied++;
}
}
out_resp->count_modes = mode_count;
-out:
mutex_unlock(&dev->mode_config.mutex);
-out_unref:
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ encoder = drm_connector_get_encoder(connector);
+ if (encoder)
+ out_resp->encoder_id = encoder->base.id;
+ else
+ out_resp->encoder_id = 0;
+
+ /* Only grab properties after probing, to make sure EDID and other
+ * properties reflect the latest status. */
+ ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
+ (uint32_t __user *)(unsigned long)(out_resp->props_ptr),
+ (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
+ &out_resp->count_props);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+out:
drm_connector_put(connector);
return ret;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 3e5f521..213fb83 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1208,3 +1208,86 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux)
return 0;
}
EXPORT_SYMBOL(drm_dp_stop_crc);
+
+struct dpcd_quirk {
+ u8 oui[3];
+ bool is_branch;
+ u32 quirks;
+};
+
+#define OUI(first, second, third) { (first), (second), (third) }
+
+static const struct dpcd_quirk dpcd_quirk_list[] = {
+ /* Analogix 7737 needs reduced M and N at HBR2 link rates */
+ { OUI(0x00, 0x22, 0xb9), true, BIT(DP_DPCD_QUIRK_LIMITED_M_N) },
+};
+
+#undef OUI
+
+/*
+ * Get a bit mask of DPCD quirks for the sink/branch device identified by
+ * ident. The quirk data is shared but it's up to the drivers to act on the
+ * data.
+ *
+ * For now, only the OUI (first three bytes) is used, but this may be extended
+ * to device identification string and hardware/firmware revisions later.
+ */
+static u32
+drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
+{
+ const struct dpcd_quirk *quirk;
+ u32 quirks = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) {
+ quirk = &dpcd_quirk_list[i];
+
+ if (quirk->is_branch != is_branch)
+ continue;
+
+ if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0)
+ continue;
+
+ quirks |= quirk->quirks;
+ }
+
+ return quirks;
+}
+
+/**
+ * drm_dp_read_desc - read sink/branch descriptor from DPCD
+ * @aux: DisplayPort AUX channel
+ * @desc: Device decriptor to fill from DPCD
+ * @is_branch: true for branch devices, false for sink devices
+ *
+ * Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the
+ * identification.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
+ bool is_branch)
+{
+ struct drm_dp_dpcd_ident *ident = &desc->ident;
+ unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI;
+ int ret, dev_id_len;
+
+ ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident));
+ if (ret < 0)
+ return ret;
+
+ desc->quirks = drm_dp_get_quirks(ident, is_branch);
+
+ dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id));
+
+ DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
+ is_branch ? "branch" : "sink",
+ (int)sizeof(ident->oui), ident->oui,
+ dev_id_len, ident->device_id,
+ ident->hw_rev >> 4, ident->hw_rev & 0xf,
+ ident->sw_major_rev, ident->sw_minor_rev,
+ desc->quirks);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_read_desc);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b5c6bb4..37b8ad3 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -358,7 +358,12 @@ EXPORT_SYMBOL(drm_put_dev);
void drm_unplug_dev(struct drm_device *dev)
{
/* for a USB device */
- drm_dev_unregister(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_modeset_unregister_all(dev);
+
+ drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
mutex_lock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fad3d44..2e55599 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -80,6 +80,8 @@
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
/* Force 6bpc */
#define EDID_QUIRK_FORCE_6BPC (1 << 10)
+/* Force 10bpc */
+#define EDID_QUIRK_FORCE_10BPC (1 << 11)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -122,6 +124,9 @@ static const struct edid_quirk {
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
EDID_QUIRK_DETAILED_IN_CM },
+ /* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
+ { "LGD", 764, EDID_QUIRK_FORCE_10BPC },
+
/* LG Philips LCD LP154W01-A5 */
{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
@@ -4244,6 +4249,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
+ if (quirks & EDID_QUIRK_FORCE_10BPC)
+ connector->display_info.bpc = 10;
+
if (quirks & EDID_QUIRK_FORCE_12BPC)
connector->display_info.bpc = 12;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a0ea324..1f178b8 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2446,7 +2446,7 @@ EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
int __init drm_fb_helper_modinit(void)
{
#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
- const char *name = "fbcon";
+ const char name[] = "fbcon";
struct module *fbcon;
mutex_lock(&module_mutex);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index fedd4d6..5dc8c43 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -948,8 +948,6 @@ retry:
}
out:
- if (ret && crtc->funcs->page_flip_target)
- drm_crtc_vblank_put(crtc);
if (fb)
drm_framebuffer_put(fb);
if (crtc->primary->old_fb)
@@ -964,5 +962,8 @@ out:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
+ if (ret && crtc->funcs->page_flip_target)
+ drm_crtc_vblank_put(crtc);
+
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index d019b5e..2d955d7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -161,8 +161,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
file_size += sizeof(*iter.hdr) * n_obj;
/* Allocate the file in vmalloc memory, it's likely to be big */
- iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_HIGHMEM |
- __GFP_NOWARN | __GFP_NORETRY, PAGE_KERNEL);
+ iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+ PAGE_KERNEL);
if (!iter.start) {
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 09d3c4c..50294a7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -82,14 +82,9 @@ err_file_priv_free:
return ret;
}
-static void exynos_drm_preclose(struct drm_device *dev,
- struct drm_file *file)
-{
- exynos_drm_subdrv_close(dev, file);
-}
-
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
+ exynos_drm_subdrv_close(dev, file);
kfree(file->driver_priv);
file->driver_priv = NULL;
}
@@ -145,7 +140,6 @@ static struct drm_driver exynos_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
| DRIVER_ATOMIC | DRIVER_RENDER,
.open = exynos_drm_open,
- .preclose = exynos_drm_preclose,
.lastclose = exynos_drm_lastclose,
.postclose = exynos_drm_postclose,
.gem_free_object_unlocked = exynos_drm_gem_free_object,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index cb31769..39c7405 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -160,12 +160,9 @@ struct exynos_drm_clk {
* drm framework doesn't support multiple irq yet.
* we can refer to the crtc to current hardware interrupt occurred through
* this pipe value.
- * @enabled: if the crtc is enabled or not
- * @event: vblank event that is currently queued for flip
- * @wait_update: wait all pending planes updates to finish
- * @pending_update: number of pending plane updates in this crtc
* @ops: pointer to callbacks for exynos drm specific functionality
* @ctx: A pointer to the crtc's implementation specific context
+ * @pipe_clk: A pointer to the crtc's pipeline clock.
*/
struct exynos_drm_crtc {
struct drm_crtc base;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index fc4fda7..d404de8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1633,7 +1633,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
{
struct device *dev = dsi->dev;
struct device_node *node = dev->of_node;
- struct device_node *ep;
int ret;
ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
@@ -1641,32 +1640,21 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
if (ret < 0)
return ret;
- ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0);
- if (!ep) {
- dev_err(dev, "no output port with endpoint specified\n");
- return -EINVAL;
- }
-
- ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency",
+ ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency",
&dsi->burst_clk_rate);
if (ret < 0)
- goto end;
+ return ret;
- ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency",
+ ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency",
&dsi->esc_clk_rate);
if (ret < 0)
- goto end;
-
- of_node_put(ep);
+ return ret;
dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0);
if (!dsi->bridge_node)
return -EINVAL;
-end:
- of_node_put(ep);
-
- return ret;
+ return 0;
}
static int exynos_dsi_bind(struct device *dev, struct device *master,
@@ -1817,6 +1805,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
static int exynos_dsi_remove(struct platform_device *pdev)
{
+ struct exynos_dsi *dsi = platform_get_drvdata(pdev);
+
+ of_node_put(dsi->bridge_node);
+
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &exynos_dsi_component_ops);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 5243840..1ff6ab6 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -43,6 +43,8 @@
#include <drm/exynos_drm.h>
+#include <media/cec-notifier.h>
+
#include "exynos_drm_crtc.h"
#define HOTPLUG_DEBOUNCE_MS 1100
@@ -118,6 +120,7 @@ struct hdmi_context {
bool dvi_mode;
struct delayed_work hotplug_work;
struct drm_display_mode current_mode;
+ struct cec_notifier *notifier;
const struct hdmi_driver_data *drv_data;
void __iomem *regs;
@@ -821,6 +824,7 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
if (gpiod_get_value(hdata->hpd_gpio))
return connector_status_connected;
+ cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
return connector_status_disconnected;
}
@@ -859,6 +863,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
edid->width_cm, edid->height_cm);
drm_mode_connector_update_edid_property(connector, edid);
+ cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid);
ret = drm_add_edid_modes(connector, edid);
@@ -1501,6 +1506,7 @@ static void hdmi_disable(struct drm_encoder *encoder)
if (funcs && funcs->disable)
(*funcs->disable)(crtc);
+ cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
cancel_delayed_work(&hdata->hotplug_work);
hdmiphy_disable(hdata);
@@ -1880,15 +1886,22 @@ static int hdmi_probe(struct platform_device *pdev)
}
}
+ hdata->notifier = cec_notifier_get(&pdev->dev);
+ if (hdata->notifier == NULL) {
+ ret = -ENOMEM;
+ goto err_hdmiphy;
+ }
+
pm_runtime_enable(dev);
ret = component_add(&pdev->dev, &hdmi_component_ops);
if (ret)
- goto err_disable_pm_runtime;
+ goto err_notifier_put;
return ret;
-err_disable_pm_runtime:
+err_notifier_put:
+ cec_notifier_put(hdata->notifier);
pm_runtime_disable(dev);
err_hdmiphy:
@@ -1907,9 +1920,11 @@ static int hdmi_remove(struct platform_device *pdev)
struct hdmi_context *hdata = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&hdata->hotplug_work);
+ cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
component_del(&pdev->dev, &hdmi_component_ops);
+ cec_notifier_put(hdata->notifier);
pm_runtime_disable(&pdev->dev);
if (!IS_ERR(hdata->reg_hdmi_en))
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 3f4f424..3949b09 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -21,6 +21,7 @@
#include <drm/drmP.h>
#include <linux/shmem_fs.h>
+#include <asm/set_memory.h>
#include "psb_drv.h"
#include "blitter.h"
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 5ee93ff..1f9b35a 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -35,6 +35,7 @@
#include <linux/pm_runtime.h>
#include <acpi/video.h>
#include <linux/module.h>
+#include <asm/set_memory.h>
static struct drm_driver driver;
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 0066fe7..be3eefe 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -759,20 +759,23 @@ void psb_intel_lvds_init(struct drm_device *dev,
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
+ DRM_DEBUG_KMS("Using mode from DDC\n");
goto out; /* FIXME: check for quirks */
}
}
/* Failed to get EDID, what about VBT? do we need this? */
- if (mode_dev->vbt_mode)
+ if (dev_priv->lfp_lvds_vbt_mode) {
mode_dev->panel_fixed_mode =
- drm_mode_duplicate(dev, mode_dev->vbt_mode);
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (!mode_dev->panel_fixed_mode)
- if (dev_priv->lfp_lvds_vbt_mode)
- mode_dev->panel_fixed_mode =
- drm_mode_duplicate(dev,
- dev_priv->lfp_lvds_vbt_mode);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG_KMS("Using mode from VBT\n");
+ goto out;
+ }
+ }
/*
* If we didn't get EDID, try checking if the panel is already turned
@@ -789,6 +792,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG_KMS("Using pre-programmed mode\n");
goto out; /* FIXME: check for quirks */
}
}
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 5abc69c..f77dcfa 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -760,7 +760,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
* Get the endpoint node. In our case, dsi has one output port1
* to which the external HDMI bridge is connected.
*/
- ret = drm_of_find_panel_or_bridge(np, 0, 0, NULL, &dsi->bridge);
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index e091809..b00edd3 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -87,3 +87,16 @@ config DRM_I915_LOW_LEVEL_TRACEPOINTS
and also analyze the request dependency resolving timeline.
If in doubt, say "N".
+
+config DRM_I915_DEBUG_VBLANK_EVADE
+ bool "Enable extra debug warnings for vblank evasion"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on extra debug warnings for the
+ vblank evade mechanism. This gives a warning every time the
+ the deadline allotted for the vblank evade critical section
+ is exceeded, even if there isn't an actual risk of missing
+ the vblank.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index dca989e..24fe04d 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -779,8 +779,26 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine;
+ struct intel_vgpu_workload *pos, *n;
+ unsigned int tmp;
+
+ /* free the unsubmited workloads in the queues. */
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ list_for_each_entry_safe(pos, n,
+ &vgpu->workload_q_head[engine->id], list) {
+ list_del_init(&pos->list);
+ free_workload(pos);
+ }
+ }
+}
+
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
{
+ clean_workloads(vgpu, ALL_ENGINES);
kmem_cache_destroy(vgpu->workloads);
}
@@ -811,17 +829,9 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
- struct intel_vgpu_workload *pos, *n;
unsigned int tmp;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
- /* free the unsubmited workload in the queue */
- list_for_each_entry_safe(pos, n,
- &vgpu->workload_q_head[engine->id], list) {
- list_del_init(&pos->list);
- free_workload(pos);
- }
-
+ clean_workloads(vgpu, engine_mask);
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
- }
}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 0ad1a50..0ffd696 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1244,7 +1244,7 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
mode = vgpu_vreg(vgpu, offset);
if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
- WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
+ WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
vgpu->id);
return 0;
}
@@ -1366,18 +1366,28 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- i915_reg_t reg = {.reg = offset};
+ u32 v = *(u32 *)p_data;
+
+ if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
+ return intel_vgpu_default_mmio_write(vgpu,
+ offset, p_data, bytes);
switch (offset) {
case 0x4ddc:
- vgpu_vreg(vgpu, offset) = 0x8000003c;
- /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */
- I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+ /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+ vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
break;
case 0x42080:
- vgpu_vreg(vgpu, offset) = 0x8000;
- /* WaCompressedResourceDisplayNewHashMode:skl */
- I915_WRITE(reg, vgpu_vreg(vgpu, offset));
+ /* bypass WaCompressedResourceDisplayNewHashMode */
+ vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
+ break;
+ case 0xe194:
+ /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+ vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
+ break;
+ case 0x7014:
+ /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
+ vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
break;
default:
return -EINVAL;
@@ -1634,7 +1644,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+ skl_misc_ctl_write);
MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2568,7 +2579,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x6e570, D_BDW_PLUS);
MMIO_D(0x65f10, D_BDW_PLUS);
- MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
+ skl_misc_ctl_write);
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index c6e7972..a5e11d8 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -340,6 +340,9 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
} else
v = mmio->value;
+ if (mmio->in_context)
+ continue;
+
I915_WRITE(mmio->reg, v);
POSTING_READ(mmio->reg);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 79ba4b34..f25ff13 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -129,9 +129,13 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
struct vgpu_sched_data *vgpu_data;
ktime_t cur_time;
- /* no target to schedule */
- if (!scheduler->next_vgpu)
+ /* no need to schedule if next_vgpu is the same with current_vgpu,
+ * let scheduler chose next_vgpu again by setting it to NULL.
+ */
+ if (scheduler->next_vgpu == scheduler->current_vgpu) {
+ scheduler->next_vgpu = NULL;
return;
+ }
/*
* after the flag is set, workload dispatch thread will
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3036d48..4842867 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1235,6 +1235,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_fini;
pci_set_drvdata(pdev, &dev_priv->drm);
+ /*
+ * Disable the system suspend direct complete optimization, which can
+ * leave the device suspended skipping the driver's suspend handlers
+ * if the device was already runtime suspended. This is needed due to
+ * the difference in our runtime and system suspend sequence and
+ * becaue the HDA driver may require us to enable the audio power
+ * domain during system suspend.
+ */
+ pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
ret = i915_driver_init_early(dev_priv, ent);
if (ret < 0)
@@ -1272,10 +1281,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_priv->ipc_enabled = false;
- /* Everything is in place, we can now relax! */
- DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
- driver.name, driver.major, driver.minor, driver.patchlevel,
- driver.date, pci_name(pdev), dev_priv->drm.primary->index);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
DRM_INFO("DRM_I915_DEBUG enabled\n");
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c9b0949..2c453a4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -562,7 +562,8 @@ struct intel_link_m_n {
void intel_link_compute_m_n(int bpp, int nlanes,
int pixel_clock, int link_clock,
- struct intel_link_m_n *m_n);
+ struct intel_link_m_n *m_n,
+ bool reduce_m_n);
/* Interface history:
*
@@ -2990,6 +2991,16 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
return false;
}
+static inline bool
+intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped)
+ return true;
+#endif
+ return false;
+}
+
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 532a577..615f0a8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment;
+ gfp_t noreclaim;
int ret;
- gfp_t gfp;
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2315,22 +2315,31 @@ rebuild_st:
* Fail silently without starting the shrinker
*/
mapping = obj->base.filp->f_mapping;
- gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
- gfp |= __GFP_NORETRY | __GFP_NOWARN;
+ noreclaim = mapping_gfp_constraint(mapping,
+ ~(__GFP_IO | __GFP_RECLAIM));
+ noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
+
sg = st->sgl;
st->nents = 0;
for (i = 0; i < page_count; i++) {
- page = shmem_read_mapping_page_gfp(mapping, i, gfp);
- if (unlikely(IS_ERR(page))) {
- i915_gem_shrink(dev_priv,
- page_count,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_PURGEABLE);
+ const unsigned int shrink[] = {
+ I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
+ 0,
+ }, *s = shrink;
+ gfp_t gfp = noreclaim;
+
+ do {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
- }
- if (unlikely(IS_ERR(page))) {
- gfp_t reclaim;
+ if (likely(!IS_ERR(page)))
+ break;
+
+ if (!*s) {
+ ret = PTR_ERR(page);
+ goto err_sg;
+ }
+
+ i915_gem_shrink(dev_priv, 2 * page_count, *s++);
+ cond_resched();
/* We've tried hard to allocate the memory by reaping
* our own buffer, now let the real VM do its job and
@@ -2340,15 +2349,26 @@ rebuild_st:
* defer the oom here by reporting the ENOMEM back
* to userspace.
*/
- reclaim = mapping_gfp_mask(mapping);
- reclaim |= __GFP_NORETRY; /* reclaim, but no oom */
-
- page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto err_sg;
+ if (!*s) {
+ /* reclaim and warn, but no oom */
+ gfp = mapping_gfp_mask(mapping);
+
+ /* Our bo are always dirty and so we require
+ * kswapd to reclaim our pages (direct reclaim
+ * does not effectively begin pageout of our
+ * buffers on its own). However, direct reclaim
+ * only waits for kswapd when under allocation
+ * congestion. So as a result __GFP_RECLAIM is
+ * unreliable and fails to actually reclaim our
+ * dirty pages -- unless you try over and over
+ * again with !__GFP_NORETRY. However, we still
+ * want to fail this allocation rather than
+ * trigger the out-of-memory killer and for
+ * this we want the future __GFP_MAYFAIL.
+ */
}
- }
+ } while (1);
+
if (!i ||
sg->length >= max_segment ||
page_to_pfn(page) != last_pfn + 1) {
@@ -3298,6 +3318,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
int ret;
+ /* If the device is asleep, we have no requests outstanding */
+ if (!READ_ONCE(i915->gt.awake))
+ return 0;
+
if (flags & I915_WAIT_LOCKED) {
struct i915_gem_timeline *tl;
@@ -4218,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
mapping = obj->base.filp->f_mapping;
mapping_set_gfp_mask(mapping, mask);
+ GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
i915_gem_object_init(obj, &i915_gem_object_ops);
@@ -4789,7 +4814,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
- SLAB_DESTROY_BY_RCU);
+ SLAB_TYPESAFE_BY_RCU);
if (!dev_priv->requests)
goto err_vmas;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8bab4ae..f1989b8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -31,6 +31,8 @@
#include <linux/seq_file.h>
#include <linux/stop_machine.h>
+#include <asm/set_memory.h>
+
#include <drm/drmP.h>
#include <drm/i915_drm.h>
@@ -193,9 +195,12 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
u32 pte_flags;
int ret;
- ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->size);
- if (ret)
- return ret;
+ if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+ ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
+ vma->size);
+ if (ret)
+ return ret;
+ }
vma->pages = vma->obj->mm.pages;
@@ -2186,6 +2191,101 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
gen8_set_pte(&gtt_base[i], scratch_pte);
}
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+ struct drm_i915_private *dev_priv = vm->i915;
+
+ /*
+ * Make sure the internal GAM fifo has been cleared of all GTT
+ * writes before exiting stop_machine(). This guarantees that
+ * any aperture accesses waiting to start in another process
+ * cannot back up behind the GTT writes causing a hang.
+ * The register can be any arbitrary GAM register.
+ */
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+ struct i915_address_space *vm;
+ dma_addr_t addr;
+ u64 offset;
+ enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+ struct insert_page *arg = _arg;
+
+ gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct insert_page arg = { vm, addr, offset, level };
+
+ stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+ struct i915_address_space *vm;
+ struct sg_table *st;
+ u64 start;
+ enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+ struct insert_entries *arg = _arg;
+
+ gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+ struct sg_table *st,
+ u64 start,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct insert_entries arg = { vm, st, start, level };
+
+ stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+struct clear_range {
+ struct i915_address_space *vm;
+ u64 start;
+ u64 length;
+};
+
+static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
+{
+ struct clear_range *arg = _arg;
+
+ gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
+ u64 start,
+ u64 length)
+{
+ struct clear_range arg = { vm, start, length };
+
+ stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
+}
+
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
@@ -2304,10 +2404,11 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
- if (appgtt->base.allocate_va_range) {
+ if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
+ appgtt->base.allocate_va_range) {
ret = appgtt->base.allocate_va_range(&appgtt->base,
vma->node.start,
- vma->node.size);
+ vma->size);
if (ret)
goto err_pages;
}
@@ -2779,6 +2880,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+ /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
+ if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
+ ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+ if (ggtt->base.clear_range != nop_clear_range)
+ ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+ }
+
ggtt->invalidate = gen6_ggtt_invalidate;
return ggtt_probe_common(ggtt, size);
@@ -2991,7 +3100,8 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
void i915_ggtt_disable_guc(struct drm_i915_private *i915)
{
- i915->ggtt.invalidate = gen6_ggtt_invalidate;
+ if (i915->ggtt.invalidate == guc_ggtt_invalidate)
+ i915->ggtt.invalidate = gen6_ggtt_invalidate;
}
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 5ddbc94..a74d0ac 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- req->head = req->ring->tail;
+ req->head = req->ring->emit;
/* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index a211c53..129c58b 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -521,7 +521,7 @@ static inline struct drm_i915_gem_request *
__i915_gem_active_get_rcu(const struct i915_gem_active *active)
{
/* Performing a lockless retrieval of the active request is super
- * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
+ * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
* slab of request objects will not be freed whilst we hold the
* RCU read lock. It does not guarantee that the request itself
* will not be freed and then *reused*. Viz,
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 129ed30..57d9f7f 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -59,9 +59,6 @@ static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
return;
mutex_unlock(&dev->struct_mutex);
-
- /* expedite the RCU grace period to free some request slabs */
- synchronize_rcu_expedited();
}
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
@@ -274,8 +271,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
I915_SHRINK_ACTIVE);
intel_runtime_pm_put(dev_priv);
- synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
-
return freed;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a0d6d43..fb5231f 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
obj->mm.quirked = false;
}
if (!i915_gem_object_is_tiled(obj)) {
- GEM_BUG_ON(!obj->mm.quirked);
+ GEM_BUG_ON(obj->mm.quirked);
__i915_gem_object_pin_pages(obj);
obj->mm.quirked = true;
}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 1642fff..ab5140b 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client,
GEM_BUG_ON(freespace < wqi_size);
/* The GuC firmware wants the tail index in QWords, not bytes */
- tail = rq->tail;
- assert_ring_tail_valid(rq->ring, rq->tail);
- tail >>= 3;
+ tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index fd97fe0..190f6aa 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2953,7 +2953,6 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
- u32 val;
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
@@ -2964,18 +2963,16 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
enable_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_LPE_PIPE_A_INTERRUPT |
+ I915_LPE_PIPE_B_INTERRUPT;
+
if (IS_CHERRYVIEW(dev_priv))
- enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+ enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
+ I915_LPE_PIPE_C_INTERRUPT;
WARN_ON(dev_priv->irq_mask != ~0);
- val = (I915_LPE_PIPE_A_INTERRUPT |
- I915_LPE_PIPE_B_INTERRUPT |
- I915_LPE_PIPE_C_INTERRUPT);
-
- enable_mask |= val;
-
dev_priv->irq_mask = ~enable_mask;
GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index f87b0c4..1a78363 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -208,7 +208,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
GEN5_FEATURES,
.platform = INTEL_IRONLAKE,
- .is_mobile = 1,
+ .is_mobile = 1, .has_fbc = 1,
};
#define GEN6_FEATURES \
@@ -390,7 +390,6 @@ static const struct intel_device_info intel_skylake_gt3_info = {
.has_hw_contexts = 1, \
.has_logical_ring_contexts = 1, \
.has_guc = 1, \
- .has_decoupled_mmio = 1, \
.has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \
.has_full_48bit_ppgtt = 1, \
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index c0cb297..2cfe96d3 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -36,10 +36,6 @@
#define VGT_VERSION_MAJOR 1
#define VGT_VERSION_MINOR 0
-#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
-#define INTEL_VGT_IF_VERSION \
- INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
-
/*
* notifications from guest to vgpu device model
*/
@@ -55,8 +51,8 @@ enum vgt_g2v_type {
struct vgt_if {
u64 magic; /* VGT_MAGIC */
- uint16_t version_major;
- uint16_t version_minor;
+ u16 version_major;
+ u16 version_minor;
u32 vgt_id; /* ID of vGT instance */
u32 rsv1[12]; /* pad to offset 0x40 */
/*
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 11b12f4..65b837e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3051,10 +3051,14 @@ enum skl_disp_power_wells {
#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
+#define CLKCFG_FSB_1067_ALT (0 << 0) /* hrawclk 266 */
#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
-/* Note, below two are guess */
-#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */
-#define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */
+/*
+ * Note that on at least on ELK the below value is reported for both
+ * 333 and 400 MHz BIOS FSB setting, but given that the gmch datasheet
+ * lists only 200/266/333 MHz FSB as supported let's decode it as 333 MHz.
+ */
+#define CLKCFG_FSB_1333_ALT (4 << 0) /* hrawclk 333 */
#define CLKCFG_FSB_MASK (7 << 0)
#define CLKCFG_MEM_533 (1 << 4)
#define CLKCFG_MEM_667 (2 << 4)
@@ -8276,7 +8280,7 @@ enum {
/* MIPI DSI registers */
-#define _MIPI_PORT(port, a, c) ((port) ? c : a) /* ports A and C only */
+#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 4ab8a97..2e73901 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -60,8 +60,8 @@
*/
void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
- uint64_t magic;
- uint32_t version;
+ u64 magic;
+ u16 version_major;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
@@ -69,10 +69,8 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
if (magic != VGT_MAGIC)
return;
- version = INTEL_VGT_IF_VERSION_ENCODE(
- __raw_i915_read16(dev_priv, vgtif_reg(version_major)),
- __raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
- if (version != INTEL_VGT_IF_VERSION) {
+ version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
+ if (version_major < VGT_VERSION_MAJOR) {
DRM_INFO("VGT interface version mismatch!\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index dd3ad52..f29a226 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1798,13 +1798,11 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
case CLKCFG_FSB_800:
return 200000;
case CLKCFG_FSB_1067:
+ case CLKCFG_FSB_1067_ALT:
return 266667;
case CLKCFG_FSB_1333:
+ case CLKCFG_FSB_1333_ALT:
return 333333;
- /* these two are just a guess; one of them might be right */
- case CLKCFG_FSB_1600:
- case CLKCFG_FSB_1600_ALT:
- return 400000;
default:
return 133333;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3617927..9106ea3 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
-static void intel_modeset_setup_hw_state(struct drm_device *dev);
+static void intel_modeset_setup_hw_state(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
struct intel_limit {
@@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev,
struct drm_crtc *crtc;
int i, ret;
- intel_modeset_setup_hw_state(dev);
+ intel_modeset_setup_hw_state(dev, ctx);
i915_redisable_vga(to_i915(dev));
if (!state)
@@ -4598,7 +4599,7 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
- unsigned scaler_user, int *scaler_id, unsigned int rotation,
+ unsigned int scaler_user, int *scaler_id,
int src_w, int src_h, int dst_w, int dst_h)
{
struct intel_crtc_scaler_state *scaler_state =
@@ -4607,9 +4608,12 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
to_intel_crtc(crtc_state->base.crtc);
int need_scaling;
- need_scaling = drm_rotation_90_or_270(rotation) ?
- (src_h != dst_w || src_w != dst_h):
- (src_w != dst_w || src_h != dst_h);
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ need_scaling = src_w != dst_w || src_h != dst_h;
/*
* if plane is being disabled or scaler is no more required or force detach
@@ -4671,7 +4675,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
- &state->scaler_state.scaler_id, DRM_ROTATE_0,
+ &state->scaler_state.scaler_id,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
}
@@ -4700,7 +4704,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
&plane_state->scaler_id,
- plane_state->base.rotation,
drm_rect_width(&plane_state->base.src) >> 16,
drm_rect_height(&plane_state->base.src) >> 16,
drm_rect_width(&plane_state->base.dst),
@@ -5823,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_update_watermarks(intel_crtc);
}
-static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
+static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5853,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
return;
}
- state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+ state->acquire_ctx = ctx;
/* Everything's already locked, -EDEADLK can't happen. */
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
@@ -6101,7 +6105,7 @@ retry:
pipe_config->fdi_lanes = lane;
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
- link_bw, &pipe_config->fdi_m_n);
+ link_bw, &pipe_config->fdi_m_n, false);
ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
@@ -6277,7 +6281,8 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
}
static void compute_m_n(unsigned int m, unsigned int n,
- uint32_t *ret_m, uint32_t *ret_n)
+ uint32_t *ret_m, uint32_t *ret_n,
+ bool reduce_m_n)
{
/*
* Reduce M/N as much as possible without loss in precision. Several DP
@@ -6285,9 +6290,11 @@ static void compute_m_n(unsigned int m, unsigned int n,
* values. The passed in values are more likely to have the least
* significant bits zero than M after rounding below, so do this first.
*/
- while ((m & 1) == 0 && (n & 1) == 0) {
- m >>= 1;
- n >>= 1;
+ if (reduce_m_n) {
+ while ((m & 1) == 0 && (n & 1) == 0) {
+ m >>= 1;
+ n >>= 1;
+ }
}
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
@@ -6298,16 +6305,19 @@ static void compute_m_n(unsigned int m, unsigned int n,
void
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
- struct intel_link_m_n *m_n)
+ struct intel_link_m_n *m_n,
+ bool reduce_m_n)
{
m_n->tu = 64;
compute_m_n(bits_per_pixel * pixel_clock,
link_clock * nlanes * 8,
- &m_n->gmch_m, &m_n->gmch_n);
+ &m_n->gmch_m, &m_n->gmch_n,
+ reduce_m_n);
compute_m_n(pixel_clock, link_clock,
- &m_n->link_m, &m_n->link_n);
+ &m_n->link_m, &m_n->link_n,
+ reduce_m_n);
}
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -12197,6 +12207,15 @@ static void update_scanline_offset(struct intel_crtc *crtc)
* type. For DP ports it behaves like most other platforms, but on HDMI
* there's an extra 1 line difference. So we need to add two instead of
* one to the value.
+ *
+ * On VLV/CHV DSI the scanline counter would appear to increment
+ * approx. 1/3 of a scanline before start of vblank. Unfortunately
+ * that means we can't tell whether we're in vblank or not while
+ * we're on that particular line. We must still set scanline_offset
+ * to 1 so that the vblank timestamps come out correct when we query
+ * the scanline counter from within the vblank interrupt handler.
+ * However if queried just before the start of vblank we'll get an
+ * answer that's slightly in the future.
*/
if (IS_GEN2(dev_priv)) {
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -15013,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev)
intel_setup_outputs(dev_priv);
drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(dev);
+ intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
drm_modeset_unlock_all(dev);
for_each_intel_crtc(dev, crtc) {
@@ -15050,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev)
return 0;
}
-static void intel_enable_pipe_a(struct drm_device *dev)
+static void intel_enable_pipe_a(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp;
- struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
int ret;
/* We can't just switch on the pipe A, we need to set things up with a
@@ -15128,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
}
-static void intel_sanitize_crtc(struct intel_crtc *crtc)
+static void intel_sanitize_crtc(struct intel_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -15174,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
plane = crtc->plane;
crtc->base.primary->state->visible = true;
crtc->plane = !plane;
- intel_crtc_disable_noatomic(&crtc->base);
+ intel_crtc_disable_noatomic(&crtc->base, ctx);
crtc->plane = plane;
}
@@ -15184,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* resume. Force-enable the pipe to fix this, the update_dpms
* call below we restore the pipe to the right state, but leave
* the required bits on. */
- intel_enable_pipe_a(dev);
+ intel_enable_pipe_a(dev, ctx);
}
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
if (crtc->active && !intel_crtc_has_encoders(crtc))
- intel_crtc_disable_noatomic(&crtc->base);
+ intel_crtc_disable_noatomic(&crtc->base, ctx);
if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
/*
@@ -15488,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
* and sanitizes it to the current state
*/
static void
-intel_modeset_setup_hw_state(struct drm_device *dev)
+intel_modeset_setup_hw_state(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
@@ -15508,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
for_each_pipe(dev_priv, pipe) {
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- intel_sanitize_crtc(crtc);
+ intel_sanitize_crtc(crtc, ctx);
intel_dump_pipe_config(crtc, crtc->config,
"[setup_hw_state]");
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index ee77b51..fc691b8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1507,37 +1507,6 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("common rates: %s\n", str);
}
-bool
-__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
-{
- u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
- DP_SINK_OUI;
-
- return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
- sizeof(*desc);
-}
-
-bool intel_dp_read_desc(struct intel_dp *intel_dp)
-{
- struct intel_dp_desc *desc = &intel_dp->desc;
- bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
- DP_OUI_SUPPORT;
- int dev_id_len;
-
- if (!__intel_dp_read_desc(intel_dp, desc))
- return false;
-
- dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
- DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
- drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
- (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
- dev_id_len, desc->device_id,
- desc->hw_rev >> 4, desc->hw_rev & 0xf,
- desc->sw_major_rev, desc->sw_minor_rev);
-
- return true;
-}
-
static int rate_to_index(int find, const int *rates)
{
int i = 0;
@@ -1624,6 +1593,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
int common_rates[DP_MAX_SUPPORTED_RATES] = {};
int common_len;
uint8_t link_bw, rate_select;
+ bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
+ DP_DPCD_QUIRK_LIMITED_M_N);
common_len = intel_dp_common_rates(intel_dp, common_rates);
@@ -1753,7 +1724,8 @@ found:
intel_link_compute_m_n(bpp, lane_count,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
- &pipe_config->dp_m_n);
+ &pipe_config->dp_m_n,
+ reduce_m_n);
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -1761,7 +1733,8 @@ found:
intel_link_compute_m_n(bpp, lane_count,
intel_connector->panel.downclock_mode->clock,
pipe_config->port_clock,
- &pipe_config->dp_m2_n2);
+ &pipe_config->dp_m2_n2,
+ reduce_m_n);
}
/*
@@ -3622,7 +3595,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_read_dpcd(intel_dp))
return false;
- intel_dp_read_desc(intel_dp);
+ drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+ drm_dp_is_branch(intel_dp->dpcd));
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
@@ -4624,7 +4598,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp_print_rates(intel_dp);
- intel_dp_read_desc(intel_dp);
+ drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+ drm_dp_is_branch(intel_dp->dpcd));
intel_dp_configure_mst(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index 6532e22..40ba313 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
struct intel_panel *panel = &connector->panel;
- intel_dp_aux_enable_backlight(connector);
-
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
panel->backlight.max = 0xFFFF;
else
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index c1f62eb..989e255 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -44,6 +44,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
int lane_count, slots;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
+ bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
+ DP_DPCD_QUIRK_LIMITED_M_N);
pipe_config->has_pch_encoder = false;
bpp = 24;
@@ -75,7 +77,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_link_compute_m_n(bpp, lane_count,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
- &pipe_config->dp_m_n);
+ &pipe_config->dp_m_n,
+ reduce_m_n);
pipe_config->dp_m_n.tu = slots;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index aaee394..f630c7a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -906,14 +906,6 @@ enum link_m_n_set {
M2_N2
};
-struct intel_dp_desc {
- u8 oui[3];
- u8 device_id[6];
- u8 hw_rev;
- u8 sw_major_rev;
- u8 sw_minor_rev;
-} __packed;
-
struct intel_dp_compliance_data {
unsigned long edid;
uint8_t video_pattern;
@@ -957,7 +949,7 @@ struct intel_dp {
/* Max link BW for the sink as per DPCD registers */
int max_sink_link_bw;
/* sink or branch descriptor */
- struct intel_dp_desc desc;
+ struct drm_dp_desc desc;
struct drm_dp_aux aux;
enum intel_display_power_domain aux_power_domain;
uint8_t train_set[4];
@@ -1532,9 +1524,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
}
bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
-bool __intel_dp_read_desc(struct intel_dp *intel_dp,
- struct intel_dp_desc *desc);
-bool intel_dp_read_desc(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 3ffe8b1..fc0ef49 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -410,11 +410,10 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
val |= (ULPS_STATE_ENTER | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
- /* Wait for ULPS Not active */
+ /* Wait for ULPS active */
if (intel_wait_for_register(dev_priv,
- MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE,
- GLK_ULPS_NOT_ACTIVE, 20))
- DRM_ERROR("ULPS is still active\n");
+ MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20))
+ DRM_ERROR("ULPS not active\n");
/* Exit ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 854e8e0..f94eacf 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1075,6 +1075,22 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
return 0;
}
+static bool ring_is_idle(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ bool idle = true;
+
+ intel_runtime_pm_get(dev_priv);
+
+ /* No bit for gen2, so assume the CS parser is idle */
+ if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+ idle = false;
+
+ intel_runtime_pm_put(dev_priv);
+
+ return idle;
+}
+
/**
* intel_engine_is_idle() - Report if the engine has finished process all work
* @engine: the intel_engine_cs
@@ -1084,8 +1100,6 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
*/
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
/* Any inflight/incomplete requests? */
if (!i915_seqno_passed(intel_engine_get_seqno(engine),
intel_engine_last_submit(engine)))
@@ -1100,7 +1114,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return false;
/* Ring stopped? */
- if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+ if (!ring_is_idle(engine))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index ded2add..d93c584 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -82,20 +82,10 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
int *width, int *height)
{
- int w, h;
-
- if (drm_rotation_90_or_270(cache->plane.rotation)) {
- w = cache->plane.src_h;
- h = cache->plane.src_w;
- } else {
- w = cache->plane.src_w;
- h = cache->plane.src_h;
- }
-
if (width)
- *width = w;
+ *width = cache->plane.src_w;
if (height)
- *height = h;
+ *height = cache->plane.src_h;
}
static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
@@ -746,6 +736,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
cache->plane.rotation = plane_state->base.rotation;
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
cache->plane.visible = plane_state->base.visible;
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 25d8e76..292fedf 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -63,6 +63,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include "i915_drv.h"
#include <linux/delay.h>
@@ -121,6 +122,10 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
kfree(rsc);
+ pm_runtime_forbid(&platdev->dev);
+ pm_runtime_set_active(&platdev->dev);
+ pm_runtime_enable(&platdev->dev);
+
return platdev;
err:
@@ -144,44 +149,10 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
static void lpe_audio_irq_unmask(struct irq_data *d)
{
- struct drm_i915_private *dev_priv = d->chip_data;
- unsigned long irqflags;
- u32 val = (I915_LPE_PIPE_A_INTERRUPT |
- I915_LPE_PIPE_B_INTERRUPT);
-
- if (IS_CHERRYVIEW(dev_priv))
- val |= I915_LPE_PIPE_C_INTERRUPT;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
- dev_priv->irq_mask &= ~val;
- I915_WRITE(VLV_IIR, val);
- I915_WRITE(VLV_IIR, val);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- POSTING_READ(VLV_IMR);
-
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static void lpe_audio_irq_mask(struct irq_data *d)
{
- struct drm_i915_private *dev_priv = d->chip_data;
- unsigned long irqflags;
- u32 val = (I915_LPE_PIPE_A_INTERRUPT |
- I915_LPE_PIPE_B_INTERRUPT);
-
- if (IS_CHERRYVIEW(dev_priv))
- val |= I915_LPE_PIPE_C_INTERRUPT;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
- dev_priv->irq_mask |= val;
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- I915_WRITE(VLV_IIR, val);
- I915_WRITE(VLV_IIR, val);
- POSTING_READ(VLV_IIR);
-
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static struct irq_chip lpe_audio_irqchip = {
@@ -325,8 +296,6 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
desc = irq_to_desc(dev_priv->lpe_audio.irq);
- lpe_audio_irq_mask(&desc->irq_data);
-
lpe_audio_platdev_destroy(dev_priv);
irq_free_desc(dev_priv->lpe_audio.irq);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c8f7c63..62f44d3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
- assert_ring_tail_valid(rq->ring, rq->tail);
- reg_state[CTX_RING_TAIL+1] = rq->tail;
+ reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
/* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page.
@@ -1989,7 +1988,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
ce->ring = ring;
ce->state = vma;
- ce->initialised = engine->init_context == NULL;
+ ce->initialised |= engine->init_context == NULL;
return 0;
@@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
ce->state->obj->mm.dirty = true;
i915_gem_object_unpin_map(ce->state->obj);
- ce->ring->head = ce->ring->tail = 0;
- intel_ring_update_space(ce->ring);
+ intel_ring_reset(ce->ring, 0);
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 71cbe9c..5abef482 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -240,7 +240,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
return false;
}
- intel_dp_read_desc(dp);
+ drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd));
DRM_DEBUG_KMS("Success: LSPCON init\n");
return true;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 570bd60..078fd1b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3373,20 +3373,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
/* n.b., src is 16.16 fixed point, dst is whole integer */
if (plane->id == PLANE_CURSOR) {
+ /*
+ * Cursors only support 0/180 degree rotation,
+ * hence no need to account for rotation here.
+ */
src_w = pstate->base.src_w;
src_h = pstate->base.src_h;
dst_w = pstate->base.crtc_w;
dst_h = pstate->base.crtc_h;
} else {
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
src_w = drm_rect_width(&pstate->base.src);
src_h = drm_rect_height(&pstate->base.src);
dst_w = drm_rect_width(&pstate->base.dst);
dst_h = drm_rect_height(&pstate->base.dst);
}
- if (drm_rotation_90_or_270(pstate->base.rotation))
- swap(dst_w, dst_h);
-
downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
@@ -3417,12 +3423,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
if (y && format != DRM_FORMAT_NV12)
return 0;
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (drm_rotation_90_or_270(pstate->rotation))
- swap(width, height);
-
/* for planar format */
if (format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
@@ -3505,12 +3513,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
fb->modifier != I915_FORMAT_MOD_Yf_TILED)
return 8;
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
- if (drm_rotation_90_or_270(pstate->rotation))
- swap(src_w, src_h);
-
/* Halve UV plane width and height for NV12 */
if (fb->format->format == DRM_FORMAT_NV12 && !y) {
src_w /= 2;
@@ -3794,13 +3804,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
width = intel_pstate->base.crtc_w;
height = intel_pstate->base.crtc_h;
} else {
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
}
- if (drm_rotation_90_or_270(pstate->rotation))
- swap(width, height);
-
cpp = fb->format->cpp[0];
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
@@ -4335,11 +4347,19 @@ skl_compute_wm(struct drm_atomic_state *state)
struct drm_crtc_state *cstate;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct skl_wm_values *results = &intel_state->wm_results;
+ struct drm_device *dev = state->dev;
struct skl_pipe_wm *pipe_wm;
bool changed = false;
int ret, i;
/*
+ * When we distrust bios wm we always need to recompute to set the
+ * expected DDB allocations for each CRTC.
+ */
+ if (to_i915(dev)->wm.distrust_bios_wm)
+ changed = true;
+
+ /*
* If this transaction isn't actually touching any CRTC's, don't
* bother with watermark calculation. Note that if we pass this
* test, we're guaranteed to hold at least one CRTC state mutex,
@@ -4349,6 +4369,7 @@ skl_compute_wm(struct drm_atomic_state *state)
*/
for_each_new_crtc_in_state(state, crtc, cstate, i)
changed = true;
+
if (!changed)
return 0;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index c3780d0..559f1ab 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -435,8 +435,9 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
}
/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
- if (intel_crtc->config->pipe_src_w > 3200 ||
- intel_crtc->config->pipe_src_h > 2000) {
+ if (dev_priv->psr.psr2_support &&
+ (intel_crtc->config->pipe_src_w > 3200 ||
+ intel_crtc->config->pipe_src_h > 2000)) {
dev_priv->psr.psr2_support = false;
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 66a2b8b..513a0f4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
void intel_ring_update_space(struct intel_ring *ring)
{
- ring->space = __intel_ring_space(ring->head, ring->tail, ring->size);
+ ring->space = __intel_ring_space(ring->head, ring->emit, ring->size);
}
static int
@@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
i915_gem_request_submit(request);
- assert_ring_tail_valid(request->ring, request->tail);
- I915_WRITE_TAIL(request->engine, request->tail);
+ I915_WRITE_TAIL(request->engine,
+ intel_ring_set_tail(request->ring, request->tail));
}
static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
@@ -1316,11 +1316,23 @@ err:
return PTR_ERR(addr);
}
+void intel_ring_reset(struct intel_ring *ring, u32 tail)
+{
+ GEM_BUG_ON(!list_empty(&ring->request_list));
+ ring->tail = tail;
+ ring->head = tail;
+ ring->emit = tail;
+ intel_ring_update_space(ring);
+}
+
void intel_ring_unpin(struct intel_ring *ring)
{
GEM_BUG_ON(!ring->vma);
GEM_BUG_ON(!ring->vaddr);
+ /* Discard any unused bytes beyond that submitted to hw. */
+ intel_ring_reset(ring, ring->tail);
+
if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma);
else
@@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ /* Restart from the beginning of the rings for convenience */
for_each_engine(engine, dev_priv, id)
- engine->buffer->head = engine->buffer->tail;
+ intel_ring_reset(engine->buffer, 0);
}
static int ring_request_alloc(struct drm_i915_gem_request *request)
@@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
unsigned space;
/* Would completion of this request free enough space? */
- space = __intel_ring_space(target->postfix, ring->tail,
+ space = __intel_ring_space(target->postfix, ring->emit,
ring->size);
if (space >= bytes)
break;
@@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
struct intel_ring *ring = req->ring;
- int remain_actual = ring->size - ring->tail;
- int remain_usable = ring->effective_size - ring->tail;
+ int remain_actual = ring->size - ring->emit;
+ int remain_usable = ring->effective_size - ring->emit;
int bytes = num_dwords * sizeof(u32);
int total_bytes, wait_bytes;
bool need_wrap = false;
@@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
if (unlikely(need_wrap)) {
GEM_BUG_ON(remain_actual > ring->space);
- GEM_BUG_ON(ring->tail + remain_actual > ring->size);
+ GEM_BUG_ON(ring->emit + remain_actual > ring->size);
/* Fill the tail with MI_NOOP */
- memset(ring->vaddr + ring->tail, 0, remain_actual);
- ring->tail = 0;
+ memset(ring->vaddr + ring->emit, 0, remain_actual);
+ ring->emit = 0;
ring->space -= remain_actual;
}
- GEM_BUG_ON(ring->tail > ring->size - bytes);
- cs = ring->vaddr + ring->tail;
- ring->tail += bytes;
+ GEM_BUG_ON(ring->emit > ring->size - bytes);
+ cs = ring->vaddr + ring->emit;
+ ring->emit += bytes;
ring->space -= bytes;
GEM_BUG_ON(ring->space < 0);
@@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
int num_dwords =
- (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
u32 *cs;
if (num_dwords == 0)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a82a080..f7144fe 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -145,6 +145,7 @@ struct intel_ring {
u32 head;
u32 tail;
+ u32 emit;
int space;
int size;
@@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
+void intel_ring_reset(struct intel_ring *ring, u32 tail);
+void intel_ring_update_space(struct intel_ring *ring);
void intel_ring_unpin(struct intel_ring *ring);
void intel_ring_free(struct intel_ring *ring);
@@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
* reserved for the command packet (i.e. the value passed to
* intel_ring_begin()).
*/
- GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
+ GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
}
static inline u32
@@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
GEM_BUG_ON(tail >= ring->size);
}
-void intel_ring_update_space(struct intel_ring *ring);
+static inline unsigned int
+intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
+{
+ /* Whilst writes to the tail are strictly order, there is no
+ * serialisation between readers and the writers. The tail may be
+ * read by i915_gem_request_retire() just as it is being updated
+ * by execlists, as although the breadcrumb is complete, the context
+ * switch hasn't been seen.
+ */
+ assert_ring_tail_valid(ring, tail);
+ ring->tail = tail;
+ return tail;
+}
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index f7d4314..e6517ed 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -83,10 +83,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
*/
void intel_pipe_update_start(struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+ bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
DEFINE_WAIT(wait);
vblank_start = adjusted_mode->crtc_vblank_start;
@@ -139,6 +142,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
drm_crtc_vblank_put(&crtc->base);
+ /*
+ * On VLV/CHV DSI the scanline counter would appear to
+ * increment approx. 1/3 of a scanline before start of vblank.
+ * The registers still get latched at start of vblank however.
+ * This means we must not write any registers on the first
+ * line of vblank (since not the whole line is actually in
+ * vblank). And unfortunately we can't use the interrupt to
+ * wait here since it will fire too soon. We could use the
+ * frame start interrupt instead since it will fire after the
+ * critical scanline, but that would require more changes
+ * in the interrupt code. So for now we'll just do the nasty
+ * thing and poll for the bad scanline to pass us by.
+ *
+ * FIXME figure out if BXT+ DSI suffers from this as well
+ */
+ while (need_vlv_dsi_wa && scanline == vblank_start)
+ scanline = intel_get_crtc_scanline(crtc);
+
crtc->debug.scanline_start = scanline;
crtc->debug.start_vbl_time = ktime_get();
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
@@ -198,12 +219,15 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
crtc->debug.min_vbl, crtc->debug.max_vbl,
crtc->debug.scanline_start, scanline_end);
- } else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
- VBLANK_EVASION_TIME_US)
+ }
+#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
+ else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
+ VBLANK_EVASION_TIME_US)
DRM_WARN("Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
pipe_name(pipe),
ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
VBLANK_EVASION_TIME_US);
+#endif
}
static void
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 4b7f73a..f841152 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -59,8 +59,6 @@ struct drm_i915_gem_request;
* available in the work queue (note, the queue is shared,
* not per-engine). It is OK for this to be nonzero, but
* it should not be huge!
- * q_fail: failed to enqueue a work item. This should never happen,
- * because we check for space beforehand.
* b_fail: failed to ring the doorbell. This should never happen, unless
* somehow the hardware misbehaves, or maybe if the GuC firmware
* crashes? We probably need to reset the GPU to recover.
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 1afb8b06..12b85b3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -320,7 +320,7 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = NULL;
struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
@@ -359,7 +359,7 @@ static int igt_ctx_exec(void *arg)
}
for_each_engine(engine, i915, id) {
- if (dw == 0) {
+ if (!obj) {
obj = create_test_object(ctx, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
@@ -376,8 +376,10 @@ static int igt_ctx_exec(void *arg)
goto out_unlock;
}
- if (++dw == max_dwords(obj))
+ if (++dw == max_dwords(obj)) {
+ obj = NULL;
dw = 0;
+ }
ndwords++;
}
ncontexts++;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 6a8258e..9f24c5d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -174,7 +174,7 @@ struct drm_i915_private *mock_gem_device(void)
i915->requests = KMEM_CACHE(mock_request,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
- SLAB_DESTROY_BY_RCU);
+ SLAB_TYPESAFE_BY_RCU);
if (!i915->requests)
goto err_vmas;
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 8fb801f..8b05ecb 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -673,7 +673,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
ret = drm_of_find_panel_or_bridge(child,
imx_ldb->lvds_mux ? 4 : 2, 0,
&channel->panel, &channel->bridge);
- if (ret)
+ if (ret && ret != -ENODEV)
return ret;
/* panel ddc only if there is no bridge */
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 808b995..b5cc6e1 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -19,6 +19,7 @@
#include <drm/drm_of.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -900,16 +901,12 @@ static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
{
- u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */
-
- while (timeout_ms--) {
- if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
- break;
-
- usleep_range(2, 4);
- }
+ int ret;
+ u32 val;
- if (timeout_ms == 0) {
+ ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY),
+ 4, 2000000);
+ if (ret) {
DRM_WARN("polling dsi wait not busy timeout!\n");
mtk_dsi_enable(dsi);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 41a1c03..0a4ffd7 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1062,7 +1062,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
}
err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (err) {
+ if (err < 0) {
dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
err);
return err;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 75382f5..10b227d 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -152,7 +152,7 @@ static struct regmap_config meson_regmap_config = {
.max_register = 0x1000,
};
-static int meson_drv_bind(struct device *dev)
+static int meson_drv_bind_master(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
struct meson_drm *priv;
@@ -233,10 +233,12 @@ static int meson_drv_bind(struct device *dev)
if (ret)
goto free_drm;
- ret = component_bind_all(drm->dev, drm);
- if (ret) {
- dev_err(drm->dev, "Couldn't bind all components\n");
- goto free_drm;
+ if (has_components) {
+ ret = component_bind_all(drm->dev, drm);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't bind all components\n");
+ goto free_drm;
+ }
}
ret = meson_plane_create(priv);
@@ -276,6 +278,11 @@ free_drm:
return ret;
}
+static int meson_drv_bind(struct device *dev)
+{
+ return meson_drv_bind_master(dev, true);
+}
+
static void meson_drv_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
@@ -357,6 +364,9 @@ static int meson_drv_probe(struct platform_device *pdev)
count += meson_probe_remote(pdev, &match, np, remote);
}
+ if (count && !match)
+ return meson_drv_bind_master(&pdev->dev, false);
+
/* If some endpoints were found, initialize the nodes */
if (count) {
dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index adb411a..f4b5358 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1173,7 +1173,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
if (IS_G200_SE(mdev)) {
- if (mdev->unique_rev_id >= 0x02) {
+ if (mdev->unique_rev_id >= 0x04) {
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
+ WREG8(MGAREG_CRTCEXT_DATA, 0);
+ } else if (mdev->unique_rev_id >= 0x02) {
u8 hi_pri_lvl;
u32 bpp;
u32 mb;
@@ -1639,6 +1642,10 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
if (mga_vga_calculate_mode_bandwidth(mode, bpp)
> (30100 * 1024))
return MODE_BANDWIDTH;
+ } else {
+ if (mga_vga_calculate_mode_bandwidth(mode, bpp)
+ > (55000 * 1024))
+ return MODE_BANDWIDTH;
}
} else if (mdev->type == G200_WB) {
if (mode->hdisplay > 1280)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 5b8e23d..0a31cd6 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -13,6 +13,7 @@ config DRM_MSM
select QCOM_SCM
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
+ select PM_OPP
default y
help
DRM/KMS driver for MSM/snapdragon.
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
index f8f48d0..9c34d78 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
@@ -116,7 +116,7 @@ static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
return 0;
}
-static struct irq_domain_ops mdss_hw_irqdomain_ops = {
+static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
.map = mdss_hw_irqdomain_map,
.xlate = irq_domain_xlate_onecell,
};
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index a38c5fe..7d37412 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -225,9 +225,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
sizeof(*mdp5_state), GFP_KERNEL);
+ if (!mdp5_state)
+ return NULL;
- if (mdp5_state && mdp5_state->base.fb)
- drm_framebuffer_reference(mdp5_state->base.fb);
+ __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
return &mdp5_state->base;
}
@@ -444,6 +445,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
mdp5_pipe_release(state->state, old_hwpipe);
mdp5_pipe_release(state->state, old_right_hwpipe);
}
+ } else {
+ mdp5_pipe_release(state->state, mdp5_state->hwpipe);
+ mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+ mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
}
return 0;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 87b5695..9d498eb 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -830,6 +830,7 @@ static struct drm_driver msm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
+ .gem_prime_res_obj = msm_gem_prime_res_obj,
.gem_prime_pin = msm_gem_prime_pin,
.gem_prime_unpin = msm_gem_prime_unpin,
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 28b6f9b..1b26ca6 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -224,6 +224,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 3f299c5..a2f89ba 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -99,8 +99,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
}
struct msm_fence {
- struct msm_fence_context *fctx;
struct dma_fence base;
+ struct msm_fence_context *fctx;
};
static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
@@ -130,19 +130,13 @@ static bool msm_fence_signaled(struct dma_fence *fence)
return fence_completed(f->fctx, f->base.seqno);
}
-static void msm_fence_release(struct dma_fence *fence)
-{
- struct msm_fence *f = to_msm_fence(fence);
- kfree_rcu(f, base.rcu);
-}
-
static const struct dma_fence_ops msm_fence_ops = {
.get_driver_name = msm_fence_get_driver_name,
.get_timeline_name = msm_fence_get_timeline_name,
.enable_signaling = msm_fence_enable_signaling,
.signaled = msm_fence_signaled,
.wait = dma_fence_default_wait,
- .release = msm_fence_release,
+ .release = dma_fence_free,
};
struct dma_fence *
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 68e509b..50289a2 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -758,6 +758,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
struct msm_gem_object *msm_obj;
bool use_vram = false;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_UNCACHED:
case MSM_BO_CACHED:
@@ -853,7 +855,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
size = PAGE_ALIGN(dmabuf->size);
+ /* Take mutex so we can modify the inactive list in msm_gem_new_impl */
+ mutex_lock(&dev->struct_mutex);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
+ mutex_unlock(&dev->struct_mutex);
+
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 60bb290..13403c6 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
if (!obj->import_attach)
msm_gem_put_pages(obj);
}
+
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ return msm_obj->resv;
+}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 1c545eb..7832e64 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -410,12 +410,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!in_fence)
return -EINVAL;
- /* TODO if we get an array-fence due to userspace merging multiple
- * fences, we need a way to determine if all the backing fences
- * are from our own context..
+ /*
+ * Wait if the fence is from a foreign context, or if the fence
+ * array contains any fence from a foreign context.
*/
-
- if (in_fence->context != gpu->fctx->context) {
+ if (!dma_fence_match_context(in_fence, gpu->fctx->context)) {
ret = dma_fence_wait(in_fence, true);
if (ret)
return ret;
@@ -496,8 +495,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
}
- if ((submit_cmd.size + submit_cmd.submit_offset) >=
- msm_obj->base.size) {
+ if (!submit_cmd.size ||
+ ((submit_cmd.size + submit_cmd.submit_offset) >
+ msm_obj->base.size)) {
DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
ret = -EINVAL;
goto out;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 97b9c38..0fdc88d 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -549,9 +549,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
gpu->grp_clks[i] = get_clock(dev, name);
/* Remember the key clocks that we need to control later */
- if (!strcmp(name, "core"))
+ if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
gpu->core_clk = gpu->grp_clks[i];
- else if (!strcmp(name, "rbbmtimer"))
+ else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
gpu->rbbmtimer_clk = gpu->grp_clks[i];
++i;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 1144e0c..0abe776 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -35,6 +35,13 @@
#include "mxsfb_drv.h"
#include "mxsfb_regs.h"
+#define MXS_SET_ADDR 0x4
+#define MXS_CLR_ADDR 0x8
+#define MODULE_CLKGATE BIT(30)
+#define MODULE_SFTRST BIT(31)
+/* 1 second delay should be plenty of time for block reset */
+#define RESET_TIMEOUT 1000000
+
static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)
{
return (val & mxsfb->devdata->hs_wdth_mask) <<
@@ -159,6 +166,36 @@ static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb)
clk_disable_unprepare(mxsfb->clk_disp_axi);
}
+/*
+ * Clear the bit and poll it cleared. This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+ u32 reg;
+
+ writel(mask, addr + MXS_CLR_ADDR);
+ return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT);
+}
+
+static int mxsfb_reset_block(void __iomem *reset_addr)
+{
+ int ret;
+
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (ret)
+ return ret;
+
+ writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
+
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (ret)
+ return ret;
+
+ return clear_poll_bit(reset_addr, MODULE_CLKGATE);
+}
+
static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
{
struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
@@ -173,6 +210,11 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
*/
mxsfb_enable_axi_clk(mxsfb);
+ /* Mandatory eLCDIF reset as per the Reference Manual */
+ err = mxsfb_reset_block(mxsfb->base);
+ if (err)
+ return;
+
/* Clear the FIFOs */
writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 6a567fe..820a480 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -4,6 +4,7 @@
struct nvkm_alarm {
struct list_head head;
+ struct list_head exec;
u64 timestamp;
void (*func)(struct nvkm_alarm *);
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 21b10f9..549763f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -360,6 +360,8 @@ nouveau_display_hpd_work(struct work_struct *work)
pm_runtime_get_sync(drm->dev->dev);
drm_helper_hpd_irq_event(drm->dev);
+ /* enable polling for external displays */
+ drm_kms_helper_poll_enable(drm->dev);
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_sync(drm->dev->dev);
@@ -413,10 +415,6 @@ nouveau_display_init(struct drm_device *dev)
if (ret)
return ret;
- /* enable polling for external displays */
- if (!dev->mode_config.poll_enabled)
- drm_kms_helper_poll_enable(dev);
-
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 2b6ac24..15a13d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -80,7 +80,7 @@ int nouveau_modeset = -1;
module_param_named(modeset, nouveau_modeset, int, 0400);
MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
-int nouveau_runtime_pm = -1;
+static int nouveau_runtime_pm = -1;
module_param_named(runpm, nouveau_runtime_pm, int, 0400);
static struct drm_driver driver_stub;
@@ -495,13 +495,16 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_fbcon_init(dev);
nouveau_led_init(dev);
- if (nouveau_runtime_pm != 0) {
+ if (nouveau_pmops_runtime()) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
pm_runtime_allow(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put(dev->dev);
+ } else {
+ /* enable polling for external displays */
+ drm_kms_helper_poll_enable(dev);
}
return 0;
@@ -524,7 +527,7 @@ nouveau_drm_unload(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (nouveau_runtime_pm != 0) {
+ if (nouveau_pmops_runtime()) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
}
@@ -723,6 +726,14 @@ nouveau_pmops_thaw(struct device *dev)
return nouveau_do_resume(drm_dev, false);
}
+bool
+nouveau_pmops_runtime()
+{
+ if (nouveau_runtime_pm == -1)
+ return nouveau_is_optimus() || nouveau_is_v1_dsm();
+ return nouveau_runtime_pm == 1;
+}
+
static int
nouveau_pmops_runtime_suspend(struct device *dev)
{
@@ -730,14 +741,7 @@ nouveau_pmops_runtime_suspend(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (nouveau_runtime_pm == 0) {
- pm_runtime_forbid(dev);
- return -EBUSY;
- }
-
- /* are we optimus enabled? */
- if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
- DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ if (!nouveau_pmops_runtime()) {
pm_runtime_forbid(dev);
return -EBUSY;
}
@@ -762,8 +766,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
int ret;
- if (nouveau_runtime_pm == 0)
- return -EINVAL;
+ if (!nouveau_pmops_runtime()) {
+ pm_runtime_forbid(dev);
+ return -EBUSY;
+ }
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
@@ -774,9 +780,6 @@ nouveau_pmops_runtime_resume(struct device *dev)
ret = nouveau_do_resume(drm_dev, true);
- if (!drm_dev->mode_config.poll_enabled)
- drm_kms_helper_poll_enable(drm_dev);
-
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
@@ -796,14 +799,7 @@ nouveau_pmops_runtime_idle(struct device *dev)
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_crtc *crtc;
- if (nouveau_runtime_pm == 0) {
- pm_runtime_forbid(dev);
- return -EBUSY;
- }
-
- /* are we optimus enabled? */
- if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
- DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ if (!nouveau_pmops_runtime()) {
pm_runtime_forbid(dev);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index eadec2f..a11b6aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -108,8 +108,6 @@ nouveau_cli(struct drm_file *fpriv)
#include <nvif/object.h>
#include <nvif/device.h>
-extern int nouveau_runtime_pm;
-
struct nouveau_drm {
struct nouveau_cli client;
struct drm_device *dev;
@@ -195,6 +193,7 @@ nouveau_drm(struct drm_device *dev)
int nouveau_pmops_suspend(struct device *);
int nouveau_pmops_resume(struct device *);
+bool nouveau_pmops_runtime(void);
#include <nvkm/core/tegra.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index ca5397b..2170534 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -568,9 +568,7 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
size *= nmemb;
- mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!mem)
- mem = vmalloc(size);
+ mem = kvmalloc(size, GFP_KERNEL);
if (!mem)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index a4aacbc..02fe0ef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -87,7 +87,7 @@ void
nouveau_vga_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
- bool runtime = false;
+ bool runtime = nouveau_pmops_runtime();
/* only relevant for PCI devices */
if (!dev->pdev)
@@ -99,10 +99,6 @@ nouveau_vga_init(struct nouveau_drm *drm)
if (pci_is_thunderbolt_attached(dev->pdev))
return;
- if (nouveau_runtime_pm == 1)
- runtime = true;
- if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
- runtime = true;
vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
@@ -113,18 +109,13 @@ void
nouveau_vga_fini(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
- bool runtime = false;
+ bool runtime = nouveau_pmops_runtime();
vga_client_register(dev->pdev, NULL, NULL, NULL);
if (pci_is_thunderbolt_attached(dev->pdev))
return;
- if (nouveau_runtime_pm == 1)
- runtime = true;
- if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
- runtime = true;
-
vga_switcheroo_unregister_client(dev->pdev);
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 0e58537..06e564a 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -831,8 +831,7 @@ nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
static int
nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
struct nv50_wndw_atom *asyw,
- struct nv50_head_atom *asyh,
- u32 pflip_flags)
+ struct nv50_head_atom *asyh)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
@@ -848,7 +847,10 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
asyw->image.h = fb->base.height;
asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
- asyw->interval = pflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ? 0 : 1;
+ if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ asyw->interval = 0;
+ else
+ asyw->interval = 1;
if (asyw->image.kind) {
asyw->image.layout = 0;
@@ -887,7 +889,6 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
struct nv50_head_atom *harm = NULL, *asyh = NULL;
bool varm = false, asyv = false, asym = false;
int ret;
- u32 pflip_flags = 0;
NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
if (asyw->state.crtc) {
@@ -896,7 +897,6 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
return PTR_ERR(asyh);
asym = drm_atomic_crtc_needs_modeset(&asyh->state);
asyv = asyh->state.active;
- pflip_flags = asyh->state.pageflip_flags;
}
if (armw->state.crtc) {
@@ -912,12 +912,9 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
asyw->set.point = true;
- if (!varm || asym || armw->state.fb != asyw->state.fb) {
- ret = nv50_wndw_atomic_check_acquire(
- wndw, asyw, asyh, pflip_flags);
- if (ret)
- return ret;
- }
+ ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
+ if (ret)
+ return ret;
} else
if (varm) {
nv50_wndw_atomic_check_release(wndw, asyw, harm);
@@ -1122,9 +1119,13 @@ static void
nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
struct nv50_wndw_atom *asyw)
{
- asyh->curs.handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
- asyh->curs.offset = asyw->image.offset;
- asyh->set.curs = asyh->curs.visible;
+ u32 handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
+ u32 offset = asyw->image.offset;
+ if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
+ asyh->curs.handle = handle;
+ asyh->curs.offset = offset;
+ asyh->set.curs = asyh->curs.visible;
+ }
}
static void
@@ -2106,7 +2107,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
asyc->set.dither = true;
}
} else {
- asyc->set.mask = ~0;
+ if (asyc)
+ asyc->set.mask = ~0;
asyh->set.mask = ~0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index 89d2e9d..acd76fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -295,7 +295,7 @@ nvkm_object_ctor(const struct nvkm_object_func *func,
INIT_LIST_HEAD(&object->head);
INIT_LIST_HEAD(&object->tree);
RB_CLEAR_NODE(&object->node);
- WARN_ON(oclass->engine && !object->engine);
+ WARN_ON(IS_ERR(object->engine));
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 3a24788..a7e55c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -148,7 +148,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
case NVKM_MEM_TARGET_NCOH: target = 3; break;
default:
WARN_ON(1);
- return;
+ goto unlock;
}
nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
@@ -160,6 +160,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
& 0x00100000),
msecs_to_jiffies(2000)) == 0)
nvkm_error(subdev, "runlist %d update timeout\n", runl);
+unlock:
mutex_unlock(&subdev->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index c639759..4a9bd4f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -638,7 +638,6 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
return ret;
}
- ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
index d1cf02d..1b0c793 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
@@ -116,6 +116,7 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
ret = nvkm_firmware_get(subdev->device, f, &sig);
if (ret)
goto free_data;
+
img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
if (!img->sig) {
ret = -ENOMEM;
@@ -126,8 +127,9 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
img->ucode_data = ls_ucode_img_build(bl, code, data,
&img->ucode_desc);
if (IS_ERR(img->ucode_data)) {
+ kfree(img->sig);
ret = PTR_ERR(img->ucode_data);
- goto free_data;
+ goto free_sig;
}
img->ucode_size = img->ucode_desc.image_size;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index df949fa..be691a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -146,7 +146,7 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
poll = false;
}
- if (list_empty(&therm->alarm.head) && poll)
+ if (poll)
nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
spin_unlock_irqrestore(&therm->lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
index 91198d7..e2fecce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
@@ -83,7 +83,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
spin_unlock_irqrestore(&fan->lock, flags);
/* schedule next fan update, if not at target speed already */
- if (list_empty(&fan->alarm.head) && target != duty) {
+ if (target != duty) {
u16 bump_period = fan->bios.bump_period;
u16 slow_down_period = fan->bios.slow_down_period;
u64 delay;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
index 59701b7..ff9fbe7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
@@ -53,7 +53,7 @@ nvkm_fantog_update(struct nvkm_fantog *fan, int percent)
duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
- if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
+ if (percent != (duty * 100)) {
u64 next_change = (percent * fan->period_us) / 100;
if (!duty)
next_change = fan->period_us - next_change;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
index b9703c0..9a79e91 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
@@ -185,7 +185,7 @@ alarm_timer_callback(struct nvkm_alarm *alarm)
spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
/* schedule the next poll in one second */
- if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
+ if (therm->func->temp_get(therm) >= 0)
nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index 07dc82b..2437f7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -36,25 +36,32 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
unsigned long flags;
LIST_HEAD(exec);
- /* move any due alarms off the pending list */
+ /* Process pending alarms. */
spin_lock_irqsave(&tmr->lock, flags);
list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
- if (alarm->timestamp <= nvkm_timer_read(tmr))
- list_move_tail(&alarm->head, &exec);
+ /* Have we hit the earliest alarm that hasn't gone off? */
+ if (alarm->timestamp > nvkm_timer_read(tmr)) {
+ /* Schedule it. If we didn't race, we're done. */
+ tmr->func->alarm_init(tmr, alarm->timestamp);
+ if (alarm->timestamp > nvkm_timer_read(tmr))
+ break;
+ }
+
+ /* Move to completed list. We'll drop the lock before
+ * executing the callback so it can reschedule itself.
+ */
+ list_del_init(&alarm->head);
+ list_add(&alarm->exec, &exec);
}
- /* reschedule interrupt for next alarm time */
- if (!list_empty(&tmr->alarms)) {
- alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
- tmr->func->alarm_init(tmr, alarm->timestamp);
- } else {
+ /* Shut down interrupt if no more pending alarms. */
+ if (list_empty(&tmr->alarms))
tmr->func->alarm_fini(tmr);
- }
spin_unlock_irqrestore(&tmr->lock, flags);
- /* execute any pending alarm handlers */
- list_for_each_entry_safe(alarm, atemp, &exec, head) {
- list_del_init(&alarm->head);
+ /* Execute completed callbacks. */
+ list_for_each_entry_safe(alarm, atemp, &exec, exec) {
+ list_del(&alarm->exec);
alarm->func(alarm);
}
}
@@ -65,24 +72,37 @@ nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
struct nvkm_alarm *list;
unsigned long flags;
- alarm->timestamp = nvkm_timer_read(tmr) + nsec;
-
- /* append new alarm to list, in soonest-alarm-first order */
+ /* Remove alarm from pending list.
+ *
+ * This both protects against the corruption of the list,
+ * and implements alarm rescheduling/cancellation.
+ */
spin_lock_irqsave(&tmr->lock, flags);
- if (!nsec) {
- if (!list_empty(&alarm->head))
- list_del(&alarm->head);
- } else {
+ list_del_init(&alarm->head);
+
+ if (nsec) {
+ /* Insert into pending list, ordered earliest to latest. */
+ alarm->timestamp = nvkm_timer_read(tmr) + nsec;
list_for_each_entry(list, &tmr->alarms, head) {
if (list->timestamp > alarm->timestamp)
break;
}
+
list_add_tail(&alarm->head, &list->head);
+
+ /* Update HW if this is now the earliest alarm. */
+ list = list_first_entry(&tmr->alarms, typeof(*list), head);
+ if (list == alarm) {
+ tmr->func->alarm_init(tmr, alarm->timestamp);
+ /* This shouldn't happen if callers aren't stupid.
+ *
+ * Worst case scenario is that it'll take roughly
+ * 4 seconds for the next alarm to trigger.
+ */
+ WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr));
+ }
}
spin_unlock_irqrestore(&tmr->lock, flags);
-
- /* process pending alarms */
- nvkm_timer_alarm_trigger(tmr);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 7b9ce87..7f48249 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -76,8 +76,8 @@ nv04_timer_intr(struct nvkm_timer *tmr)
u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
if (stat & 0x00000001) {
- nvkm_timer_alarm_trigger(tmr);
nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
+ nvkm_timer_alarm_trigger(tmr);
stat &= ~0x00000001;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 058340a..4a340ef 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -575,8 +575,6 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
if (ret)
return;
- cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
-
if (fb != old_state->fb) {
obj = to_qxl_framebuffer(fb)->obj;
user_bo = gem_to_qxl_bo(obj);
@@ -614,6 +612,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
qxl_bo_kunmap(cursor_bo);
qxl_bo_kunmap(user_bo);
+ cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
cmd->u.set.visible = 1;
cmd->u.set.shape = qxl_bo_physical_address(qdev,
cursor_bo, 0);
@@ -624,6 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
if (ret)
goto out_free_release;
+ cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 7ba4508..ea36dc4 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+ /* disable mclk switching if the refresh is >120Hz, even if the
+ * blanking period would allow it
+ */
+ if (r600_dpm_get_vrefresh(rdev) > 120)
+ return true;
+
if (vblank_time < switch_limit)
return true;
else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 53710dd..ca44233 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7401,7 +7401,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
@@ -7431,7 +7431,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
@@ -9150,23 +9150,10 @@ static u32 dce8_latency_watermark(struct dce8_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -9274,14 +9261,17 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
{
struct drm_display_mode *mode = &radeon_crtc->base.mode;
struct dce8_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask;
if (radeon_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
/* watermark for high clocks */
if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
@@ -9297,7 +9287,7 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -9337,7 +9327,7 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index d1b1e0c..5346372 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2188,13 +2188,7 @@ static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
+ lb_fill_bw = min(dfixed_trunc(a), wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -2261,7 +2255,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
struct drm_display_mode *mode = &radeon_crtc->base.mode;
struct evergreen_wm_params wm_low, wm_high;
u32 dram_channels;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 priority_a_mark = 0, priority_b_mark = 0;
@@ -2272,8 +2266,11 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
fixed20_12 a, b, c;
if (radeon_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
dram_channels = evergreen_get_number_of_dram_channels(rdev);
@@ -2291,7 +2288,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -2318,7 +2315,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -4933,7 +4930,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
@@ -4964,7 +4961,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 3eb0c4f..45e1d4e 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -203,6 +203,7 @@ static void r420_clock_resume(struct radeon_device *rdev)
static void r420_cp_errata_init(struct radeon_device *rdev)
{
+ int r;
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
/* RV410 and R420 can lock up if CP DMA to host memory happens
@@ -212,7 +213,8 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
* of the CP init, apparently.
*/
radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
- radeon_ring_lock(rdev, ring, 8);
+ r = radeon_ring_lock(rdev, ring, 8);
+ WARN_ON(r);
radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(ring, rdev->config.r300.resync_scratch);
radeon_ring_write(ring, 0xDEADBEEF);
@@ -221,12 +223,14 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
static void r420_cp_errata_fini(struct radeon_device *rdev)
{
+ int r;
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
/* Catch the RESYNC we dispatched all the way back,
* at the very beginning of the CP init.
*/
- radeon_ring_lock(rdev, ring, 8);
+ r = radeon_ring_lock(rdev, ring, 8);
+ WARN_ON(r);
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FINISH);
radeon_ring_unlock_commit(rdev, ring, false);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 0a08517..e06e2d8 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3988,7 +3988,7 @@ static void r600_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 432480f..3178ba0 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x280a)
return;
+ /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS400 &&
+ rdev->pdev->subsystem_vendor == 0x1179 &&
+ rdev->pdev->subsystem_device == 0xff31)
+ return;
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index df6b58c..3ac671f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -117,11 +117,13 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
+ !!r->write_domain;
- /* the first reloc of an UVD job is the msg and that must be in
- VRAM, also but everything into VRAM on AGP cards and older
- IGP chips to avoid image corruptions */
+ /* The first reloc of an UVD job is the msg and that must be in
+ * VRAM, the second reloc is the DPB and for WMV that must be in
+ * VRAM as well. Also put everything into VRAM on AGP cards and older
+ * IGP chips to avoid image corruptions
+ */
if (p->ring == R600_RING_TYPE_UVD_INDEX &&
- (i == 0 || pci_find_capability(p->rdev->ddev->pdev,
+ (i <= 0 || pci_find_capability(p->rdev->ddev->pdev,
PCI_CAP_ID_AGP) ||
p->rdev->family == CHIP_RS780 ||
p->rdev->family == CHIP_RS880)) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 6ecf427..0a6444d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+ /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+ * https://bugs.freedesktop.org/show_bug.cgi?id=101491
+ */
+ { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* macbook pro 8.2 */
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index c4777c8..0b3ec35 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -27,6 +27,9 @@
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
#include "radeon.h"
/*
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index e3e7cb1..4761f27 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -116,7 +116,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
if ((radeon_runtime_pm != 0) &&
radeon_has_atpx() &&
((flags & RADEON_IS_IGP) == 0) &&
- !pci_is_thunderbolt_attached(rdev->pdev))
+ !pci_is_thunderbolt_attached(dev->pdev))
flags |= RADEON_IS_PX;
/* radeon_device_init should report only fatal error
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index bec2ec0..8b72229 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -81,7 +81,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
- WARN_ON(!list_empty(&bo->va));
+ WARN_ON_ONCE(!list_empty(&bo->va));
drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 4fdc7bd..f5e9abf 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -298,7 +298,12 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
return r;
}
- radeon_fence_emit(rdev, fence, ring->idx);
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ DRM_ERROR("Failed to emit fence\n");
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
radeon_ring_unlock_commit(rdev, ring, false);
}
return 0;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 528e5a4..5303f25 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2204,23 +2204,10 @@ static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -2287,7 +2274,7 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
struct drm_display_mode *mode = &radeon_crtc->base.mode;
struct dce6_wm_params wm_low, wm_high;
u32 dram_channels;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 priority_a_mark = 0, priority_b_mark = 0;
@@ -2297,8 +2284,11 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
fixed20_12 a, b, c;
if (radeon_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min(line_time, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
@@ -2320,7 +2310,7 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -2347,7 +2337,7 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -6330,7 +6320,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
@@ -6361,7 +6351,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index d8fa7a9..ce5f2d1 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -245,8 +245,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
- struct rockchip_dp_device *dp = to_dp(encoder);
- int ret;
/*
* The hardware IC designed that VOP must output the RGB10 video
@@ -258,16 +256,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
s->output_type = DRM_MODE_CONNECTOR_eDP;
- if (dp->data->chip_type == RK3399_EDP) {
- /*
- * For RK3399, VOP Lit must code the out mode to RGB888,
- * VOP Big must code the out mode to RGB10.
- */
- ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
- encoder);
- if (ret > 0)
- s->output_mode = ROCKCHIP_OUT_MODE_P888;
- }
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index a2169dd..14fa1f8 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -615,7 +615,6 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
{
struct cdn_dp_device *dp = encoder_to_dp(encoder);
int ret, val;
- struct rockchip_crtc_state *state;
ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
if (ret < 0) {
@@ -625,14 +624,10 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
(ret) ? "LIT" : "BIG");
- state = to_rockchip_crtc_state(encoder->crtc->state);
- if (ret) {
+ if (ret)
val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
- state->output_mode = ROCKCHIP_OUT_MODE_P888;
- } else {
+ else
val = DP_SEL_VOP_LIT << 16;
- state->output_mode = ROCKCHIP_OUT_MODE_AAAA;
- }
ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
if (ret)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 3f7a82d..45589d6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -875,6 +875,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
static void vop_crtc_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
+ const struct vop_data *vop_data = vop->data;
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
@@ -967,6 +968,13 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
s->output_type);
}
+
+ /*
+ * if vop is not support RGB10 output, need force RGB10 to RGB888.
+ */
+ if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
+ !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
VOP_CTRL_SET(vop, out_mode, s->output_mode);
VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 5a4faa85..9979fd0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -142,6 +142,9 @@ struct vop_data {
const struct vop_intr *intr;
const struct vop_win_data *win;
unsigned int win_size;
+
+#define VOP_FEATURE_OUTPUT_RGB10 BIT(0)
+ u64 feature;
};
/* interrupt define */
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 0da4444..bafd698 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -275,6 +275,7 @@ static const struct vop_intr rk3288_vop_intr = {
static const struct vop_data rk3288_vop = {
.init_table = rk3288_init_reg_table,
.table_size = ARRAY_SIZE(rk3288_init_reg_table),
+ .feature = VOP_FEATURE_OUTPUT_RGB10,
.intr = &rk3288_vop_intr,
.ctrl = &rk3288_ctrl_data,
.win = rk3288_vop_win_data,
@@ -343,6 +344,7 @@ static const struct vop_reg_data rk3399_init_reg_table[] = {
static const struct vop_data rk3399_vop_big = {
.init_table = rk3399_init_reg_table,
.table_size = ARRAY_SIZE(rk3399_init_reg_table),
+ .feature = VOP_FEATURE_OUTPUT_RGB10,
.intr = &rk3399_vop_intr,
.ctrl = &rk3399_ctrl_data,
/*
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index f62041f..11d4e88 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -89,7 +89,7 @@ static int sti_compositor_bind(struct device *dev,
/* Nothing to do, wait for the second round */
break;
default:
- DRM_ERROR("Unknow subdev compoment type\n");
+ DRM_ERROR("Unknown subdev component type\n");
return 1;
}
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index ce2dcba..243905b 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -771,6 +771,8 @@ static void sti_hdmi_disable(struct drm_bridge *bridge)
clk_disable_unprepare(hdmi->clk_pix);
hdmi->enabled = false;
+
+ cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID);
}
/**
@@ -973,6 +975,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
DRM_DEBUG_KMS("%s : %dx%d cm\n",
(hdmi->hdmi_monitor ? "hdmi monitor" : "dvi monitor"),
edid->width_cm, edid->height_cm);
+ cec_notifier_set_phys_addr_from_edid(hdmi->notifier, edid);
count = drm_add_edid_modes(connector, edid);
drm_mode_connector_update_edid_property(connector, edid);
@@ -1035,6 +1038,7 @@ sti_hdmi_connector_detect(struct drm_connector *connector, bool force)
}
DRM_DEBUG_DRIVER("hdmi cable disconnected\n");
+ cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID);
return connector_status_disconnected;
}
@@ -1423,6 +1427,10 @@ static int sti_hdmi_probe(struct platform_device *pdev)
goto release_adapter;
}
+ hdmi->notifier = cec_notifier_get(&pdev->dev);
+ if (!hdmi->notifier)
+ goto release_adapter;
+
hdmi->reset = devm_reset_control_get(dev, "hdmi");
/* Take hdmi out of reset */
if (!IS_ERR(hdmi->reset))
@@ -1442,11 +1450,14 @@ static int sti_hdmi_remove(struct platform_device *pdev)
{
struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
+ cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID);
+
i2c_put_adapter(hdmi->ddc_adapt);
if (hdmi->audio_pdev)
platform_device_unregister(hdmi->audio_pdev);
component_del(&pdev->dev, &sti_hdmi_ops);
+ cec_notifier_put(hdmi->notifier);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 4070123..c6469b5 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <drm/drmP.h>
+#include <media/cec-notifier.h>
#define HDMI_STA 0x0010
#define HDMI_STA_DLL_LCK BIT(5)
@@ -64,6 +65,7 @@ static const struct drm_prop_enum_list colorspace_mode_names[] = {
* @audio_pdev: ASoC hdmi-codec platform device
* @audio: hdmi audio parameters.
* @drm_connector: hdmi connector
+ * @notifier: hotplug detect notifier
*/
struct sti_hdmi {
struct device dev;
@@ -89,6 +91,7 @@ struct sti_hdmi {
struct platform_device *audio_pdev;
struct hdmi_audio_params audio;
struct drm_connector *drm_connector;
+ struct cec_notifier *notifier;
};
u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index bbf5a4b..2db29d6 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -7,6 +7,7 @@ config DRM_TEGRA
select DRM_MIPI_DSI
select DRM_PANEL
select TEGRA_HOST1X
+ select IOMMU_IOVA if IOMMU_SUPPORT
help
Choose this option if you have an NVIDIA Tegra SoC.
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 2c66a8d..6af3a9a 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -13,6 +13,8 @@ tegra-drm-y := \
sor.o \
dpaux.o \
gr2d.o \
- gr3d.o
+ gr3d.o \
+ falcon.o \
+ vic.o
obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index dba4e09..81f86a6 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1,13 +1,15 @@
/*
* Copyright (C) 2012 Avionic Design GmbH
- * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
+ * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/host1x.h>
+#include <linux/idr.h>
#include <linux/iommu.h>
#include <drm/drm_atomic.h>
@@ -23,8 +25,11 @@
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
+#define CARVEOUT_SZ SZ_64M
+
struct tegra_drm_file {
- struct list_head contexts;
+ struct idr contexts;
+ struct mutex lock;
};
static void tegra_atomic_schedule(struct tegra_drm *tegra,
@@ -126,8 +131,9 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
return -ENOMEM;
if (iommu_present(&platform_bus_type)) {
+ u64 carveout_start, carveout_end, gem_start, gem_end;
struct iommu_domain_geometry *geometry;
- u64 start, end;
+ unsigned long order;
tegra->domain = iommu_domain_alloc(&platform_bus_type);
if (!tegra->domain) {
@@ -136,12 +142,26 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
}
geometry = &tegra->domain->geometry;
- start = geometry->aperture_start;
- end = geometry->aperture_end;
-
- DRM_DEBUG_DRIVER("IOMMU aperture initialized (%#llx-%#llx)\n",
- start, end);
- drm_mm_init(&tegra->mm, start, end - start + 1);
+ gem_start = geometry->aperture_start;
+ gem_end = geometry->aperture_end - CARVEOUT_SZ;
+ carveout_start = gem_end + 1;
+ carveout_end = geometry->aperture_end;
+
+ order = __ffs(tegra->domain->pgsize_bitmap);
+ init_iova_domain(&tegra->carveout.domain, 1UL << order,
+ carveout_start >> order,
+ carveout_end >> order);
+
+ tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
+ tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
+
+ drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
+ mutex_init(&tegra->mm_lock);
+
+ DRM_DEBUG("IOMMU apertures:\n");
+ DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
+ DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
+ carveout_end);
}
mutex_init(&tegra->clients_lock);
@@ -161,6 +181,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
+ drm->mode_config.allow_fb_modifiers = true;
+
drm->mode_config.funcs = &tegra_drm_mode_funcs;
err = tegra_drm_fb_prepare(drm);
@@ -208,6 +230,8 @@ config:
if (tegra->domain) {
iommu_domain_free(tegra->domain);
drm_mm_takedown(&tegra->mm);
+ mutex_destroy(&tegra->mm_lock);
+ put_iova_domain(&tegra->carveout.domain);
}
free:
kfree(tegra);
@@ -232,6 +256,8 @@ static void tegra_drm_unload(struct drm_device *drm)
if (tegra->domain) {
iommu_domain_free(tegra->domain);
drm_mm_takedown(&tegra->mm);
+ mutex_destroy(&tegra->mm_lock);
+ put_iova_domain(&tegra->carveout.domain);
}
kfree(tegra);
@@ -245,7 +271,8 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
if (!fpriv)
return -ENOMEM;
- INIT_LIST_HEAD(&fpriv->contexts);
+ idr_init(&fpriv->contexts);
+ mutex_init(&fpriv->lock);
filp->driver_priv = fpriv;
return 0;
@@ -424,23 +451,6 @@ fail:
#ifdef CONFIG_DRM_TEGRA_STAGING
-static struct tegra_drm_context *tegra_drm_get_context(__u64 context)
-{
- return (struct tegra_drm_context *)(uintptr_t)context;
-}
-
-static bool tegra_drm_file_owns_context(struct tegra_drm_file *file,
- struct tegra_drm_context *context)
-{
- struct tegra_drm_context *ctx;
-
- list_for_each_entry(ctx, &file->contexts, list)
- if (ctx == context)
- return true;
-
- return false;
-}
-
static int tegra_gem_create(struct drm_device *drm, void *data,
struct drm_file *file)
{
@@ -519,6 +529,28 @@ static int tegra_syncpt_wait(struct drm_device *drm, void *data,
&args->value);
}
+static int tegra_client_open(struct tegra_drm_file *fpriv,
+ struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ int err;
+
+ err = client->ops->open_channel(client, context);
+ if (err < 0)
+ return err;
+
+ err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
+ if (err < 0) {
+ client->ops->close_channel(context);
+ return err;
+ }
+
+ context->client = client;
+ context->id = err;
+
+ return 0;
+}
+
static int tegra_open_channel(struct drm_device *drm, void *data,
struct drm_file *file)
{
@@ -533,19 +565,22 @@ static int tegra_open_channel(struct drm_device *drm, void *data,
if (!context)
return -ENOMEM;
+ mutex_lock(&fpriv->lock);
+
list_for_each_entry(client, &tegra->clients, list)
if (client->base.class == args->client) {
- err = client->ops->open_channel(client, context);
- if (err)
+ err = tegra_client_open(fpriv, client, context);
+ if (err < 0)
break;
- list_add(&context->list, &fpriv->contexts);
- args->context = (uintptr_t)context;
- context->client = client;
- return 0;
+ args->context = context->id;
+ break;
}
- kfree(context);
+ if (err < 0)
+ kfree(context);
+
+ mutex_unlock(&fpriv->lock);
return err;
}
@@ -555,16 +590,22 @@ static int tegra_close_channel(struct drm_device *drm, void *data,
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_close_channel *args = data;
struct tegra_drm_context *context;
+ int err = 0;
- context = tegra_drm_get_context(args->context);
+ mutex_lock(&fpriv->lock);
- if (!tegra_drm_file_owns_context(fpriv, context))
- return -EINVAL;
+ context = idr_find(&fpriv->contexts, args->context);
+ if (!context) {
+ err = -EINVAL;
+ goto unlock;
+ }
- list_del(&context->list);
+ idr_remove(&fpriv->contexts, context->id);
tegra_drm_context_free(context);
- return 0;
+unlock:
+ mutex_unlock(&fpriv->lock);
+ return err;
}
static int tegra_get_syncpt(struct drm_device *drm, void *data,
@@ -574,19 +615,27 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,
struct drm_tegra_get_syncpt *args = data;
struct tegra_drm_context *context;
struct host1x_syncpt *syncpt;
+ int err = 0;
- context = tegra_drm_get_context(args->context);
+ mutex_lock(&fpriv->lock);
- if (!tegra_drm_file_owns_context(fpriv, context))
- return -ENODEV;
+ context = idr_find(&fpriv->contexts, args->context);
+ if (!context) {
+ err = -ENODEV;
+ goto unlock;
+ }
- if (args->index >= context->client->base.num_syncpts)
- return -EINVAL;
+ if (args->index >= context->client->base.num_syncpts) {
+ err = -EINVAL;
+ goto unlock;
+ }
syncpt = context->client->base.syncpts[args->index];
args->id = host1x_syncpt_id(syncpt);
- return 0;
+unlock:
+ mutex_unlock(&fpriv->lock);
+ return err;
}
static int tegra_submit(struct drm_device *drm, void *data,
@@ -595,13 +644,21 @@ static int tegra_submit(struct drm_device *drm, void *data,
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_submit *args = data;
struct tegra_drm_context *context;
+ int err;
- context = tegra_drm_get_context(args->context);
+ mutex_lock(&fpriv->lock);
- if (!tegra_drm_file_owns_context(fpriv, context))
- return -ENODEV;
+ context = idr_find(&fpriv->contexts, args->context);
+ if (!context) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ err = context->client->ops->submit(context, args, drm, file);
- return context->client->ops->submit(context, args, drm, file);
+unlock:
+ mutex_unlock(&fpriv->lock);
+ return err;
}
static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
@@ -612,24 +669,34 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
struct tegra_drm_context *context;
struct host1x_syncpt_base *base;
struct host1x_syncpt *syncpt;
+ int err = 0;
- context = tegra_drm_get_context(args->context);
+ mutex_lock(&fpriv->lock);
- if (!tegra_drm_file_owns_context(fpriv, context))
- return -ENODEV;
+ context = idr_find(&fpriv->contexts, args->context);
+ if (!context) {
+ err = -ENODEV;
+ goto unlock;
+ }
- if (args->syncpt >= context->client->base.num_syncpts)
- return -EINVAL;
+ if (args->syncpt >= context->client->base.num_syncpts) {
+ err = -EINVAL;
+ goto unlock;
+ }
syncpt = context->client->base.syncpts[args->syncpt];
base = host1x_syncpt_get_base(syncpt);
- if (!base)
- return -ENXIO;
+ if (!base) {
+ err = -ENXIO;
+ goto unlock;
+ }
args->id = host1x_syncpt_base_id(base);
- return 0;
+unlock:
+ mutex_unlock(&fpriv->lock);
+ return err;
}
static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
@@ -804,14 +871,25 @@ static const struct file_operations tegra_drm_fops = {
.llseek = noop_llseek,
};
+static int tegra_drm_context_cleanup(int id, void *p, void *data)
+{
+ struct tegra_drm_context *context = p;
+
+ tegra_drm_context_free(context);
+
+ return 0;
+}
+
static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
- struct tegra_drm_context *context, *tmp;
- list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
- tegra_drm_context_free(context);
+ mutex_lock(&fpriv->lock);
+ idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
+ mutex_unlock(&fpriv->lock);
+ idr_destroy(&fpriv->contexts);
+ mutex_destroy(&fpriv->lock);
kfree(fpriv);
}
@@ -844,7 +922,9 @@ static int tegra_debugfs_iova(struct seq_file *s, void *data)
struct tegra_drm *tegra = drm->dev_private;
struct drm_printer p = drm_seq_file_printer(s);
+ mutex_lock(&tegra->mm_lock);
drm_mm_print(&tegra->mm, &p);
+ mutex_unlock(&tegra->mm_lock);
return 0;
}
@@ -919,6 +999,84 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
return 0;
}
+void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size,
+ dma_addr_t *dma)
+{
+ struct iova *alloc;
+ void *virt;
+ gfp_t gfp;
+ int err;
+
+ if (tegra->domain)
+ size = iova_align(&tegra->carveout.domain, size);
+ else
+ size = PAGE_ALIGN(size);
+
+ gfp = GFP_KERNEL | __GFP_ZERO;
+ if (!tegra->domain) {
+ /*
+ * Many units only support 32-bit addresses, even on 64-bit
+ * SoCs. If there is no IOMMU to translate into a 32-bit IO
+ * virtual address space, force allocations to be in the
+ * lower 32-bit range.
+ */
+ gfp |= GFP_DMA;
+ }
+
+ virt = (void *)__get_free_pages(gfp, get_order(size));
+ if (!virt)
+ return ERR_PTR(-ENOMEM);
+
+ if (!tegra->domain) {
+ /*
+ * If IOMMU is disabled, devices address physical memory
+ * directly.
+ */
+ *dma = virt_to_phys(virt);
+ return virt;
+ }
+
+ alloc = alloc_iova(&tegra->carveout.domain,
+ size >> tegra->carveout.shift,
+ tegra->carveout.limit, true);
+ if (!alloc) {
+ err = -EBUSY;
+ goto free_pages;
+ }
+
+ *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
+ err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
+ size, IOMMU_READ | IOMMU_WRITE);
+ if (err < 0)
+ goto free_iova;
+
+ return virt;
+
+free_iova:
+ __free_iova(&tegra->carveout.domain, alloc);
+free_pages:
+ free_pages((unsigned long)virt, get_order(size));
+
+ return ERR_PTR(err);
+}
+
+void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
+ dma_addr_t dma)
+{
+ if (tegra->domain)
+ size = iova_align(&tegra->carveout.domain, size);
+ else
+ size = PAGE_ALIGN(size);
+
+ if (tegra->domain) {
+ iommu_unmap(tegra->domain, dma, size);
+ free_iova(&tegra->carveout.domain,
+ iova_pfn(&tegra->carveout.domain, dma));
+ }
+
+ free_pages((unsigned long)virt, get_order(size));
+}
+
static int host1x_drm_probe(struct host1x_device *dev)
{
struct drm_driver *driver = &tegra_drm_driver;
@@ -1003,11 +1161,13 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra124-sor", },
{ .compatible = "nvidia,tegra124-hdmi", },
{ .compatible = "nvidia,tegra124-dsi", },
+ { .compatible = "nvidia,tegra124-vic", },
{ .compatible = "nvidia,tegra132-dsi", },
{ .compatible = "nvidia,tegra210-dc", },
{ .compatible = "nvidia,tegra210-dsi", },
{ .compatible = "nvidia,tegra210-sor", },
{ .compatible = "nvidia,tegra210-sor1", },
+ { .compatible = "nvidia,tegra210-vic", },
{ /* sentinel */ }
};
@@ -1029,6 +1189,7 @@ static struct platform_driver * const drivers[] = {
&tegra_sor_driver,
&tegra_gr2d_driver,
&tegra_gr3d_driver,
+ &tegra_vic_driver,
};
static int __init host1x_drm_init(void)
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 5747acc..85aa2e3 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -12,6 +12,7 @@
#include <uapi/drm/tegra_drm.h>
#include <linux/host1x.h>
+#include <linux/iova.h>
#include <linux/of_gpio.h>
#include <drm/drmP.h>
@@ -42,8 +43,15 @@ struct tegra_drm {
struct drm_device *drm;
struct iommu_domain *domain;
+ struct mutex mm_lock;
struct drm_mm mm;
+ struct {
+ struct iova_domain domain;
+ unsigned long shift;
+ unsigned long limit;
+ } carveout;
+
struct mutex clients_lock;
struct list_head clients;
@@ -67,7 +75,7 @@ struct tegra_drm_client;
struct tegra_drm_context {
struct tegra_drm_client *client;
struct host1x_channel *channel;
- struct list_head list;
+ unsigned int id;
};
struct tegra_drm_client_ops {
@@ -105,6 +113,10 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
int tegra_drm_exit(struct tegra_drm *tegra);
+void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *iova);
+void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
+ dma_addr_t iova);
+
struct tegra_dc_soc_info;
struct tegra_output;
@@ -283,5 +295,6 @@ extern struct platform_driver tegra_dpaux_driver;
extern struct platform_driver tegra_sor_driver;
extern struct platform_driver tegra_gr2d_driver;
extern struct platform_driver tegra_gr3d_driver;
+extern struct platform_driver tegra_vic_driver;
#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
new file mode 100644
index 0000000..f685e729
--- /dev/null
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2015, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/pci_ids.h>
+#include <linux/iopoll.h>
+
+#include "falcon.h"
+#include "drm.h"
+
+enum falcon_memory {
+ FALCON_MEMORY_IMEM,
+ FALCON_MEMORY_DATA,
+};
+
+static void falcon_writel(struct falcon *falcon, u32 value, u32 offset)
+{
+ writel(value, falcon->regs + offset);
+}
+
+int falcon_wait_idle(struct falcon *falcon)
+{
+ u32 value;
+
+ return readl_poll_timeout(falcon->regs + FALCON_IDLESTATE, value,
+ (value == 0), 10, 100000);
+}
+
+static int falcon_dma_wait_idle(struct falcon *falcon)
+{
+ u32 value;
+
+ return readl_poll_timeout(falcon->regs + FALCON_DMATRFCMD, value,
+ (value & FALCON_DMATRFCMD_IDLE), 10, 100000);
+}
+
+static int falcon_copy_chunk(struct falcon *falcon,
+ phys_addr_t base,
+ unsigned long offset,
+ enum falcon_memory target)
+{
+ u32 cmd = FALCON_DMATRFCMD_SIZE_256B;
+
+ if (target == FALCON_MEMORY_IMEM)
+ cmd |= FALCON_DMATRFCMD_IMEM;
+
+ falcon_writel(falcon, offset, FALCON_DMATRFMOFFS);
+ falcon_writel(falcon, base, FALCON_DMATRFFBOFFS);
+ falcon_writel(falcon, cmd, FALCON_DMATRFCMD);
+
+ return falcon_dma_wait_idle(falcon);
+}
+
+static void falcon_copy_firmware_image(struct falcon *falcon,
+ const struct firmware *firmware)
+{
+ u32 *firmware_vaddr = falcon->firmware.vaddr;
+ dma_addr_t daddr;
+ size_t i;
+ int err;
+
+ /* copy the whole thing taking into account endianness */
+ for (i = 0; i < firmware->size / sizeof(u32); i++)
+ firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
+
+ /* ensure that caches are flushed and falcon can see the firmware */
+ daddr = dma_map_single(falcon->dev, firmware_vaddr,
+ falcon->firmware.size, DMA_TO_DEVICE);
+ err = dma_mapping_error(falcon->dev, daddr);
+ if (err) {
+ dev_err(falcon->dev, "failed to map firmware: %d\n", err);
+ return;
+ }
+ dma_sync_single_for_device(falcon->dev, daddr,
+ falcon->firmware.size, DMA_TO_DEVICE);
+ dma_unmap_single(falcon->dev, daddr, falcon->firmware.size,
+ DMA_TO_DEVICE);
+}
+
+static int falcon_parse_firmware_image(struct falcon *falcon)
+{
+ struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr;
+ struct falcon_fw_os_header_v1 *os;
+
+ /* endian problems would show up right here */
+ if (bin->magic != PCI_VENDOR_ID_NVIDIA) {
+ dev_err(falcon->dev, "incorrect firmware magic\n");
+ return -EINVAL;
+ }
+
+ /* currently only version 1 is supported */
+ if (bin->version != 1) {
+ dev_err(falcon->dev, "unsupported firmware version\n");
+ return -EINVAL;
+ }
+
+ /* check that the firmware size is consistent */
+ if (bin->size > falcon->firmware.size) {
+ dev_err(falcon->dev, "firmware image size inconsistency\n");
+ return -EINVAL;
+ }
+
+ os = falcon->firmware.vaddr + bin->os_header_offset;
+
+ falcon->firmware.bin_data.size = bin->os_size;
+ falcon->firmware.bin_data.offset = bin->os_data_offset;
+ falcon->firmware.code.offset = os->code_offset;
+ falcon->firmware.code.size = os->code_size;
+ falcon->firmware.data.offset = os->data_offset;
+ falcon->firmware.data.size = os->data_size;
+
+ return 0;
+}
+
+int falcon_read_firmware(struct falcon *falcon, const char *name)
+{
+ int err;
+
+ /* request_firmware prints error if it fails */
+ err = request_firmware(&falcon->firmware.firmware, name, falcon->dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+int falcon_load_firmware(struct falcon *falcon)
+{
+ const struct firmware *firmware = falcon->firmware.firmware;
+ int err;
+
+ falcon->firmware.size = firmware->size;
+
+ /* allocate iova space for the firmware */
+ falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
+ &falcon->firmware.paddr);
+ if (!falcon->firmware.vaddr) {
+ dev_err(falcon->dev, "dma memory mapping failed\n");
+ return -ENOMEM;
+ }
+
+ /* copy firmware image into local area. this also ensures endianness */
+ falcon_copy_firmware_image(falcon, firmware);
+
+ /* parse the image data */
+ err = falcon_parse_firmware_image(falcon);
+ if (err < 0) {
+ dev_err(falcon->dev, "failed to parse firmware image\n");
+ goto err_setup_firmware_image;
+ }
+
+ release_firmware(firmware);
+ falcon->firmware.firmware = NULL;
+
+ return 0;
+
+err_setup_firmware_image:
+ falcon->ops->free(falcon, falcon->firmware.size,
+ falcon->firmware.paddr, falcon->firmware.vaddr);
+
+ return err;
+}
+
+int falcon_init(struct falcon *falcon)
+{
+ /* check mandatory ops */
+ if (!falcon->ops || !falcon->ops->alloc || !falcon->ops->free)
+ return -EINVAL;
+
+ falcon->firmware.vaddr = NULL;
+
+ return 0;
+}
+
+void falcon_exit(struct falcon *falcon)
+{
+ if (falcon->firmware.firmware) {
+ release_firmware(falcon->firmware.firmware);
+ falcon->firmware.firmware = NULL;
+ }
+
+ if (falcon->firmware.vaddr) {
+ falcon->ops->free(falcon, falcon->firmware.size,
+ falcon->firmware.paddr,
+ falcon->firmware.vaddr);
+ falcon->firmware.vaddr = NULL;
+ }
+}
+
+int falcon_boot(struct falcon *falcon)
+{
+ unsigned long offset;
+ int err;
+
+ if (!falcon->firmware.vaddr)
+ return -EINVAL;
+
+ falcon_writel(falcon, 0, FALCON_DMACTL);
+
+ /* setup the address of the binary data so Falcon can access it later */
+ falcon_writel(falcon, (falcon->firmware.paddr +
+ falcon->firmware.bin_data.offset) >> 8,
+ FALCON_DMATRFBASE);
+
+ /* copy the data segment into Falcon internal memory */
+ for (offset = 0; offset < falcon->firmware.data.size; offset += 256)
+ falcon_copy_chunk(falcon,
+ falcon->firmware.data.offset + offset,
+ offset, FALCON_MEMORY_DATA);
+
+ /* copy the first code segment into Falcon internal memory */
+ falcon_copy_chunk(falcon, falcon->firmware.code.offset,
+ 0, FALCON_MEMORY_IMEM);
+
+ /* setup falcon interrupts */
+ falcon_writel(falcon, FALCON_IRQMSET_EXT(0xff) |
+ FALCON_IRQMSET_SWGEN1 |
+ FALCON_IRQMSET_SWGEN0 |
+ FALCON_IRQMSET_EXTERR |
+ FALCON_IRQMSET_HALT |
+ FALCON_IRQMSET_WDTMR,
+ FALCON_IRQMSET);
+ falcon_writel(falcon, FALCON_IRQDEST_EXT(0xff) |
+ FALCON_IRQDEST_SWGEN1 |
+ FALCON_IRQDEST_SWGEN0 |
+ FALCON_IRQDEST_EXTERR |
+ FALCON_IRQDEST_HALT,
+ FALCON_IRQDEST);
+
+ /* enable interface */
+ falcon_writel(falcon, FALCON_ITFEN_MTHDEN |
+ FALCON_ITFEN_CTXEN,
+ FALCON_ITFEN);
+
+ /* boot falcon */
+ falcon_writel(falcon, 0x00000000, FALCON_BOOTVEC);
+ falcon_writel(falcon, FALCON_CPUCTL_STARTCPU, FALCON_CPUCTL);
+
+ err = falcon_wait_idle(falcon);
+ if (err < 0) {
+ dev_err(falcon->dev, "Falcon boot failed due to timeout\n");
+ return err;
+ }
+
+ return 0;
+}
+
+void falcon_execute_method(struct falcon *falcon, u32 method, u32 data)
+{
+ falcon_writel(falcon, method >> 2, FALCON_UCLASS_METHOD_OFFSET);
+ falcon_writel(falcon, data, FALCON_UCLASS_METHOD_DATA);
+}
diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h
new file mode 100644
index 0000000..4504ed5
--- /dev/null
+++ b/drivers/gpu/drm/tegra/falcon.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2015, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _FALCON_H_
+#define _FALCON_H_
+
+#include <linux/types.h>
+
+#define FALCON_UCLASS_METHOD_OFFSET 0x00000040
+
+#define FALCON_UCLASS_METHOD_DATA 0x00000044
+
+#define FALCON_IRQMSET 0x00001010
+#define FALCON_IRQMSET_WDTMR (1 << 1)
+#define FALCON_IRQMSET_HALT (1 << 4)
+#define FALCON_IRQMSET_EXTERR (1 << 5)
+#define FALCON_IRQMSET_SWGEN0 (1 << 6)
+#define FALCON_IRQMSET_SWGEN1 (1 << 7)
+#define FALCON_IRQMSET_EXT(v) (((v) & 0xff) << 8)
+
+#define FALCON_IRQDEST 0x0000101c
+#define FALCON_IRQDEST_HALT (1 << 4)
+#define FALCON_IRQDEST_EXTERR (1 << 5)
+#define FALCON_IRQDEST_SWGEN0 (1 << 6)
+#define FALCON_IRQDEST_SWGEN1 (1 << 7)
+#define FALCON_IRQDEST_EXT(v) (((v) & 0xff) << 8)
+
+#define FALCON_ITFEN 0x00001048
+#define FALCON_ITFEN_CTXEN (1 << 0)
+#define FALCON_ITFEN_MTHDEN (1 << 1)
+
+#define FALCON_IDLESTATE 0x0000104c
+
+#define FALCON_CPUCTL 0x00001100
+#define FALCON_CPUCTL_STARTCPU (1 << 1)
+
+#define FALCON_BOOTVEC 0x00001104
+
+#define FALCON_DMACTL 0x0000110c
+#define FALCON_DMACTL_DMEM_SCRUBBING (1 << 1)
+#define FALCON_DMACTL_IMEM_SCRUBBING (1 << 2)
+
+#define FALCON_DMATRFBASE 0x00001110
+
+#define FALCON_DMATRFMOFFS 0x00001114
+
+#define FALCON_DMATRFCMD 0x00001118
+#define FALCON_DMATRFCMD_IDLE (1 << 1)
+#define FALCON_DMATRFCMD_IMEM (1 << 4)
+#define FALCON_DMATRFCMD_SIZE_256B (6 << 8)
+
+#define FALCON_DMATRFFBOFFS 0x0000111c
+
+struct falcon_fw_bin_header_v1 {
+ u32 magic; /* 0x10de */
+ u32 version; /* version of bin format (1) */
+ u32 size; /* entire image size including this header */
+ u32 os_header_offset;
+ u32 os_data_offset;
+ u32 os_size;
+};
+
+struct falcon_fw_os_app_v1 {
+ u32 offset;
+ u32 size;
+};
+
+struct falcon_fw_os_header_v1 {
+ u32 code_offset;
+ u32 code_size;
+ u32 data_offset;
+ u32 data_size;
+};
+
+struct falcon;
+
+struct falcon_ops {
+ void *(*alloc)(struct falcon *falcon, size_t size,
+ dma_addr_t *paddr);
+ void (*free)(struct falcon *falcon, size_t size,
+ dma_addr_t paddr, void *vaddr);
+};
+
+struct falcon_firmware_section {
+ unsigned long offset;
+ size_t size;
+};
+
+struct falcon_firmware {
+ /* Firmware after it is read but not loaded */
+ const struct firmware *firmware;
+
+ /* Raw firmware data */
+ dma_addr_t paddr;
+ void *vaddr;
+ size_t size;
+
+ /* Parsed firmware information */
+ struct falcon_firmware_section bin_data;
+ struct falcon_firmware_section data;
+ struct falcon_firmware_section code;
+};
+
+struct falcon {
+ /* Set by falcon client */
+ struct device *dev;
+ void __iomem *regs;
+ const struct falcon_ops *ops;
+ void *data;
+
+ struct falcon_firmware firmware;
+};
+
+int falcon_init(struct falcon *falcon);
+void falcon_exit(struct falcon *falcon);
+int falcon_read_firmware(struct falcon *falcon, const char *firmware_name);
+int falcon_load_firmware(struct falcon *falcon);
+int falcon_boot(struct falcon *falcon);
+void falcon_execute_method(struct falcon *falcon, u32 method, u32 data);
+int falcon_wait_idle(struct falcon *falcon);
+
+#endif /* _FALCON_H_ */
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index c61d67d..25acb73 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -52,9 +52,26 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
struct tegra_bo_tiling *tiling)
{
struct tegra_fb *fb = to_tegra_fb(framebuffer);
-
- /* TODO: handle YUV formats? */
- *tiling = fb->planes[0]->tiling;
+ uint64_t modifier = fb->base.modifier;
+
+ switch (fourcc_mod_tegra_mod(modifier)) {
+ case NV_FORMAT_MOD_TEGRA_TILED:
+ tiling->mode = TEGRA_BO_TILING_MODE_TILED;
+ tiling->value = 0;
+ break;
+
+ case NV_FORMAT_MOD_TEGRA_16BX2_BLOCK(0):
+ tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
+ tiling->value = fourcc_mod_tegra_param(modifier);
+ if (tiling->value > 5)
+ return -EINVAL;
+ break;
+
+ default:
+ /* TODO: handle YUV formats? */
+ *tiling = fb->planes[0]->tiling;
+ break;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 8672f5d..424569b 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -128,12 +128,14 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
if (!bo->mm)
return -ENOMEM;
+ mutex_lock(&tegra->mm_lock);
+
err = drm_mm_insert_node_generic(&tegra->mm,
bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
if (err < 0) {
dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
err);
- goto free;
+ goto unlock;
}
bo->paddr = bo->mm->start;
@@ -147,11 +149,14 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
bo->size = err;
+ mutex_unlock(&tegra->mm_lock);
+
return 0;
remove:
drm_mm_remove_node(bo->mm);
-free:
+unlock:
+ mutex_unlock(&tegra->mm_lock);
kfree(bo->mm);
return err;
}
@@ -161,8 +166,11 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
if (!bo->mm)
return 0;
+ mutex_lock(&tegra->mm_lock);
iommu_unmap(tegra->domain, bo->paddr, bo->size);
drm_mm_remove_node(bo->mm);
+ mutex_unlock(&tegra->mm_lock);
+
kfree(bo->mm);
return 0;
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
new file mode 100644
index 0000000..cd804e4
--- /dev/null
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (c) 2015, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/host1x.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/pmc.h>
+
+#include "drm.h"
+#include "falcon.h"
+#include "vic.h"
+
+struct vic_config {
+ const char *firmware;
+};
+
+struct vic {
+ struct falcon falcon;
+ bool booted;
+
+ void __iomem *regs;
+ struct tegra_drm_client client;
+ struct host1x_channel *channel;
+ struct iommu_domain *domain;
+ struct device *dev;
+ struct clk *clk;
+
+ /* Platform configuration */
+ const struct vic_config *config;
+};
+
+static inline struct vic *to_vic(struct tegra_drm_client *client)
+{
+ return container_of(client, struct vic, client);
+}
+
+static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
+{
+ writel(value, vic->regs + offset);
+}
+
+static int vic_runtime_resume(struct device *dev)
+{
+ struct vic *vic = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(vic->clk);
+}
+
+static int vic_runtime_suspend(struct device *dev)
+{
+ struct vic *vic = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vic->clk);
+
+ vic->booted = false;
+
+ return 0;
+}
+
+static int vic_boot(struct vic *vic)
+{
+ u32 fce_ucode_size, fce_bin_data_offset;
+ void *hdr;
+ int err = 0;
+
+ if (vic->booted)
+ return 0;
+
+ /* setup clockgating registers */
+ vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
+ CG_IDLE_CG_EN |
+ CG_WAKEUP_DLY_CNT(4),
+ NV_PVIC_MISC_PRI_VIC_CG);
+
+ err = falcon_boot(&vic->falcon);
+ if (err < 0)
+ return err;
+
+ hdr = vic->falcon.firmware.vaddr;
+ fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
+ hdr = vic->falcon.firmware.vaddr +
+ *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
+ fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
+
+ falcon_execute_method(&vic->falcon, VIC_SET_APPLICATION_ID, 1);
+ falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
+ fce_ucode_size);
+ falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
+ (vic->falcon.firmware.paddr + fce_bin_data_offset)
+ >> 8);
+
+ err = falcon_wait_idle(&vic->falcon);
+ if (err < 0) {
+ dev_err(vic->dev,
+ "failed to set application ID and FCE base\n");
+ return err;
+ }
+
+ vic->booted = true;
+
+ return 0;
+}
+
+static void *vic_falcon_alloc(struct falcon *falcon, size_t size,
+ dma_addr_t *iova)
+{
+ struct tegra_drm *tegra = falcon->data;
+
+ return tegra_drm_alloc(tegra, size, iova);
+}
+
+static void vic_falcon_free(struct falcon *falcon, size_t size,
+ dma_addr_t iova, void *va)
+{
+ struct tegra_drm *tegra = falcon->data;
+
+ return tegra_drm_free(tegra, size, va, iova);
+}
+
+static const struct falcon_ops vic_falcon_ops = {
+ .alloc = vic_falcon_alloc,
+ .free = vic_falcon_free
+};
+
+static int vic_init(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct vic *vic = to_vic(drm);
+ int err;
+
+ if (tegra->domain) {
+ err = iommu_attach_device(tegra->domain, vic->dev);
+ if (err < 0) {
+ dev_err(vic->dev, "failed to attach to domain: %d\n",
+ err);
+ return err;
+ }
+
+ vic->domain = tegra->domain;
+ }
+
+ if (!vic->falcon.data) {
+ vic->falcon.data = tegra;
+ err = falcon_load_firmware(&vic->falcon);
+ if (err < 0)
+ goto detach_device;
+ }
+
+ vic->channel = host1x_channel_request(client->dev);
+ if (!vic->channel) {
+ err = -ENOMEM;
+ goto detach_device;
+ }
+
+ client->syncpts[0] = host1x_syncpt_request(client->dev, 0);
+ if (!client->syncpts[0]) {
+ err = -ENOMEM;
+ goto free_channel;
+ }
+
+ err = tegra_drm_register_client(tegra, drm);
+ if (err < 0)
+ goto free_syncpt;
+
+ return 0;
+
+free_syncpt:
+ host1x_syncpt_free(client->syncpts[0]);
+free_channel:
+ host1x_channel_free(vic->channel);
+detach_device:
+ if (tegra->domain)
+ iommu_detach_device(tegra->domain, vic->dev);
+
+ return err;
+}
+
+static int vic_exit(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct vic *vic = to_vic(drm);
+ int err;
+
+ err = tegra_drm_unregister_client(tegra, drm);
+ if (err < 0)
+ return err;
+
+ host1x_syncpt_free(client->syncpts[0]);
+ host1x_channel_free(vic->channel);
+
+ if (vic->domain) {
+ iommu_detach_device(vic->domain, vic->dev);
+ vic->domain = NULL;
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops vic_client_ops = {
+ .init = vic_init,
+ .exit = vic_exit,
+};
+
+static int vic_open_channel(struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ struct vic *vic = to_vic(client);
+ int err;
+
+ err = pm_runtime_get_sync(vic->dev);
+ if (err < 0)
+ return err;
+
+ err = vic_boot(vic);
+ if (err < 0) {
+ pm_runtime_put(vic->dev);
+ return err;
+ }
+
+ context->channel = host1x_channel_get(vic->channel);
+ if (!context->channel) {
+ pm_runtime_put(vic->dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void vic_close_channel(struct tegra_drm_context *context)
+{
+ struct vic *vic = to_vic(context->client);
+
+ host1x_channel_put(context->channel);
+
+ pm_runtime_put(vic->dev);
+}
+
+static const struct tegra_drm_client_ops vic_ops = {
+ .open_channel = vic_open_channel,
+ .close_channel = vic_close_channel,
+ .submit = tegra_drm_submit,
+};
+
+static const struct vic_config vic_t124_config = {
+ .firmware = "nvidia/tegra124/vic03_ucode.bin",
+};
+
+static const struct vic_config vic_t210_config = {
+ .firmware = "nvidia/tegra210/vic04_ucode.bin",
+};
+
+static const struct of_device_id vic_match[] = {
+ { .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
+ { .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
+ { },
+};
+
+static int vic_probe(struct platform_device *pdev)
+{
+ struct vic_config *vic_config = NULL;
+ struct device *dev = &pdev->dev;
+ struct host1x_syncpt **syncpts;
+ struct resource *regs;
+ const struct of_device_id *match;
+ struct vic *vic;
+ int err;
+
+ match = of_match_device(vic_match, dev);
+ vic_config = (struct vic_config *)match->data;
+
+ vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
+ if (!vic)
+ return -ENOMEM;
+
+ syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "failed to get registers\n");
+ return -ENXIO;
+ }
+
+ vic->regs = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(vic->regs))
+ return PTR_ERR(vic->regs);
+
+ vic->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(vic->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(vic->clk);
+ }
+
+ vic->falcon.dev = dev;
+ vic->falcon.regs = vic->regs;
+ vic->falcon.ops = &vic_falcon_ops;
+
+ err = falcon_init(&vic->falcon);
+ if (err < 0)
+ return err;
+
+ err = falcon_read_firmware(&vic->falcon, vic_config->firmware);
+ if (err < 0)
+ goto exit_falcon;
+
+ platform_set_drvdata(pdev, vic);
+
+ INIT_LIST_HEAD(&vic->client.base.list);
+ vic->client.base.ops = &vic_client_ops;
+ vic->client.base.dev = dev;
+ vic->client.base.class = HOST1X_CLASS_VIC;
+ vic->client.base.syncpts = syncpts;
+ vic->client.base.num_syncpts = 1;
+ vic->dev = dev;
+ vic->config = vic_config;
+
+ INIT_LIST_HEAD(&vic->client.list);
+ vic->client.ops = &vic_ops;
+
+ err = host1x_client_register(&vic->client.base);
+ if (err < 0) {
+ dev_err(dev, "failed to register host1x client: %d\n", err);
+ platform_set_drvdata(pdev, NULL);
+ goto exit_falcon;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ err = vic_runtime_resume(&pdev->dev);
+ if (err < 0)
+ goto unregister_client;
+ }
+
+ return 0;
+
+unregister_client:
+ host1x_client_unregister(&vic->client.base);
+exit_falcon:
+ falcon_exit(&vic->falcon);
+
+ return err;
+}
+
+static int vic_remove(struct platform_device *pdev)
+{
+ struct vic *vic = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&vic->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ if (pm_runtime_enabled(&pdev->dev))
+ pm_runtime_disable(&pdev->dev);
+ else
+ vic_runtime_suspend(&pdev->dev);
+
+ falcon_exit(&vic->falcon);
+
+ return 0;
+}
+
+static const struct dev_pm_ops vic_pm_ops = {
+ SET_RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL)
+};
+
+struct platform_driver tegra_vic_driver = {
+ .driver = {
+ .name = "tegra-vic",
+ .of_match_table = vic_match,
+ .pm = &vic_pm_ops
+ },
+ .probe = vic_probe,
+ .remove = vic_remove,
+};
diff --git a/drivers/gpu/drm/tegra/vic.h b/drivers/gpu/drm/tegra/vic.h
new file mode 100644
index 0000000..2184481
--- /dev/null
+++ b/drivers/gpu/drm/tegra/vic.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_VIC_H
+#define TEGRA_VIC_H
+
+/* VIC methods */
+
+#define VIC_SET_APPLICATION_ID 0x00000200
+#define VIC_SET_FCE_UCODE_SIZE 0x0000071C
+#define VIC_SET_FCE_UCODE_OFFSET 0x0000072C
+
+/* VIC registers */
+
+#define NV_PVIC_MISC_PRI_VIC_CG 0x000016d0
+#define CG_IDLE_CG_DLY_CNT(val) ((val & 0x3f) << 0)
+#define CG_IDLE_CG_EN (1 << 6)
+#define CG_WAKEUP_DLY_CNT(val) ((val & 0xf) << 16)
+
+/* Firmware offsets */
+
+#define VIC_UCODE_FCE_HEADER_OFFSET (6*4)
+#define VIC_UCODE_FCE_DATA_OFFSET (7*4)
+#define FCE_UCODE_SIZE_OFFSET (2*4)
+
+#endif /* TEGRA_VIC_H */
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e44626a..a6d7fcb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1394,7 +1394,7 @@ EXPORT_SYMBOL(ttm_bo_evict_mm);
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
unsigned long p_size)
{
- int ret = -EINVAL;
+ int ret;
struct ttm_mem_type_manager *man;
unsigned i;
@@ -1412,7 +1412,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
return ret;
man->bdev = bdev;
- ret = 0;
if (type != TTM_PL_SYSTEM) {
ret = (*man->func->init)(man, p_size);
if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index a37de5d..eeddc1e 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -51,6 +51,9 @@
#if IS_ENABLED(CONFIG_AGP)
#include <asm/agp.h>
#endif
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 16
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index cec4b4b..90ddbdc 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -53,6 +53,9 @@
#if IS_ENABLED(CONFIG_AGP)
#include <asm/agp.h>
#endif
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 4
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index aee3c00..5260179 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -44,6 +44,9 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
/**
* Allocates storage for pointers to the pages that back the ttm.
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index d05abc6..4a65003 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -37,7 +37,7 @@ struct udl_fbdev {
};
#define DL_ALIGN_UP(x, a) ALIGN(x, a)
-#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
+#define DL_ALIGN_DOWN(x, a) ALIGN_DOWN(x, a)
/** Read the red component (0..255) of a 32 bpp colour. */
#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 4918668..1e1c90b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -175,8 +175,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
DRM_INFO("virgl 3d acceleration not supported by guest\n");
#endif
- ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs,
- callbacks, names, NULL);
+ ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
DRM_ERROR("failed to find virt queues\n");
goto err_vqs;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 130d51c..4b948fb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -41,9 +41,9 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20170221"
+#define VMWGFX_DRIVER_DATE "20170607"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 12
+#define VMWGFX_DRIVER_MINOR 13
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index b6a0806..a1c68e6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
return fifo_state->static_buffer;
else {
fifo_state->dynamic_buffer = vmalloc(bytes);
+ if (!fifo_state->dynamic_buffer)
+ goto out_err;
return fifo_state->dynamic_buffer;
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ef9f3a2..1d2db5d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -274,108 +274,6 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
}
-
-/**
- * vmw_du_cursor_plane_update() - Update cursor image and location
- *
- * @plane: plane object to update
- * @crtc: owning CRTC of @plane
- * @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of plane on crtc
- * @crtc_y: y offset of plane on crtc
- * @crtc_w: width of plane rectangle on crtc
- * @crtc_h: height of plane rectangle on crtc
- * @src_x: Not used
- * @src_y: Not used
- * @src_w: Not used
- * @src_h: Not used
- *
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int vmw_du_cursor_plane_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w,
- unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
-{
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct vmw_surface *surface = NULL;
- struct vmw_dma_buffer *dmabuf = NULL;
- s32 hotspot_x, hotspot_y;
- int ret;
-
- hotspot_x = du->hotspot_x + fb->hot_x;
- hotspot_y = du->hotspot_y + fb->hot_y;
-
- /* A lot of the code assumes this */
- if (crtc_w != 64 || crtc_h != 64) {
- ret = -EINVAL;
- goto out;
- }
-
- if (vmw_framebuffer_to_vfb(fb)->dmabuf)
- dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
- else
- surface = vmw_framebuffer_to_vfbs(fb)->surface;
-
- if (surface && !surface->snooper.image) {
- DRM_ERROR("surface not suitable for cursor\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* setup new image */
- ret = 0;
- if (surface) {
- /* vmw_user_surface_lookup takes one reference */
- du->cursor_surface = surface;
-
- du->cursor_age = du->cursor_surface->snooper.age;
-
- ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
- 64, 64, hotspot_x, hotspot_y);
- } else if (dmabuf) {
- /* vmw_user_surface_lookup takes one reference */
- du->cursor_dmabuf = dmabuf;
-
- ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h,
- hotspot_x, hotspot_y);
- } else {
- vmw_cursor_update_position(dev_priv, false, 0, 0);
- goto out;
- }
-
- if (!ret) {
- du->cursor_x = crtc_x + du->set_gui_x;
- du->cursor_y = crtc_y + du->set_gui_y;
-
- vmw_cursor_update_position(dev_priv, true,
- du->cursor_x + hotspot_x,
- du->cursor_y + hotspot_y);
- }
-
-out:
- return ret;
-}
-
-
-int vmw_du_cursor_plane_disable(struct drm_plane *plane)
-{
- if (plane->fb) {
- drm_framebuffer_unreference(plane->fb);
- plane->fb = NULL;
- }
-
- return -EINVAL;
-}
-
-
void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
{
vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
@@ -473,18 +371,6 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
void
-vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-
- drm_atomic_set_fb_for_plane(plane->state, NULL);
- vmw_cursor_update_position(dev_priv, false, 0, 0);
-}
-
-
-void
vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -1498,6 +1384,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
*/
if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
dmabuf && only_2d &&
+ mode_cmd->width > 64 && /* Don't create a proxy for cursor */
dev_priv->active_display_unit == vmw_du_screen_target) {
ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
dmabuf, &surface);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 13f2f1d2..5f8d678 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -256,10 +256,6 @@ int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t size,
struct drm_modeset_acquire_ctx *ctx);
-int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height,
- int32_t hot_x, int32_t hot_y);
-int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
int vmw_du_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val);
@@ -339,15 +335,6 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
-int vmw_du_cursor_plane_disable(struct drm_plane *plane);
-int vmw_du_cursor_plane_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w,
- unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
/* Atomic Helpers */
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
@@ -356,8 +343,6 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state);
void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state);
-void vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state);
int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state);
void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index bad31bd..50be1f0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -56,6 +56,8 @@ enum stdu_content_type {
* @right: Right side of bounding box.
* @top: Top side of bounding box.
* @bottom: Bottom side of bounding box.
+ * @fb_left: Left side of the framebuffer/content bounding box
+ * @fb_top: Top of the framebuffer/content bounding box
* @buf: DMA buffer when DMA-ing between buffer and screen targets.
* @sid: Surface ID when copying between surface and screen targets.
*/
@@ -63,6 +65,7 @@ struct vmw_stdu_dirty {
struct vmw_kms_dirty base;
SVGA3dTransferType transfer;
s32 left, right, top, bottom;
+ s32 fb_left, fb_top;
u32 pitch;
union {
struct vmw_dma_buffer *buf;
@@ -647,7 +650,7 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
*
* @dirty: The closure structure.
*
- * This function calculates the bounding box for all the incoming clips
+ * This function calculates the bounding box for all the incoming clips.
*/
static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
{
@@ -656,11 +659,19 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
dirty->num_hits = 1;
- /* Calculate bounding box */
+ /* Calculate destination bounding box */
ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
+
+ /*
+ * Calculate content bounding box. We only need the top-left
+ * coordinate because width and height will be the same as the
+ * destination bounding box above
+ */
+ ddirty->fb_left = min_t(s32, ddirty->fb_left, dirty->fb_x);
+ ddirty->fb_top = min_t(s32, ddirty->fb_top, dirty->fb_y);
}
@@ -697,11 +708,11 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
/* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used);
- src += dirty->unit_y1 * src_pitch + dirty->unit_x1 * stdu->cpp;
+ src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
dst_pitch = ddirty->pitch;
dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
- dst += dirty->fb_y * dst_pitch + dirty->fb_x * stdu->cpp;
+ dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
/* Figure out the real direction */
@@ -760,7 +771,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
}
out_cleanup:
- ddirty->left = ddirty->top = S32_MAX;
+ ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
ddirty->right = ddirty->bottom = S32_MIN;
}
@@ -812,6 +823,7 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
SVGA3D_READ_HOST_VRAM;
ddirty.left = ddirty.top = S32_MAX;
ddirty.right = ddirty.bottom = S32_MIN;
+ ddirty.fb_left = ddirty.fb_top = S32_MAX;
ddirty.pitch = vfb->base.pitches[0];
ddirty.buf = buf;
ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
@@ -1355,6 +1367,11 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
DRM_ERROR("Failed to bind surface to STDU.\n");
else
crtc->primary->fb = plane->state->fb;
+
+ ret = vmw_stdu_update_st(dev_priv, stdu);
+
+ if (ret)
+ DRM_ERROR("Failed to update STDU.\n");
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7681341..6b70bd2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1274,11 +1274,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
uint32_t size;
- uint32_t backup_handle;
+ uint32_t backup_handle = 0;
if (req->multisample_count != 0)
return -EINVAL;
+ if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
+
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128;
@@ -1314,12 +1317,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
&res->backup,
&user_srf->backup_base);
- if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
- res->backup_size) {
- DRM_ERROR("Surface backup buffer is too small.\n");
- vmw_dmabuf_unreference(&res->backup);
- ret = -EINVAL;
- goto out_unlock;
+ if (ret == 0) {
+ if (res->backup->base.num_pages * PAGE_SIZE <
+ res->backup_size) {
+ DRM_ERROR("Surface backup buffer is too small.\n");
+ vmw_dmabuf_unreference(&res->backup);
+ ret = -EINVAL;
+ goto out_unlock;
+ } else {
+ backup_handle = req->buffer_handle;
+ }
}
} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
@@ -1491,7 +1498,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
dev_priv->stdu_max_height);
if (size.width > max_width || size.height > max_height) {
- DRM_ERROR("%ux%u\n, exeeds max surface size %ux%u",
+ DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
size.width, size.height,
max_width, max_height);
return -EINVAL;
OpenPOWER on IntegriCloud