summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-05-16 08:21:51 +1000
committerDave Airlie <airlied@redhat.com>2018-05-16 08:31:29 +1000
commit95d2c3e15da613afd53b4b8f2cdb352dc7d12221 (patch)
treeb407c31c3ad2a7e133e61ba36edb31274492fb84
parentb8a71080ad288eb3fe42f101e64526cdd2823f93 (diff)
parent8344c53f57057b42a5da87e9557c40fcda18fb7a (diff)
downloadop-kernel-dev-95d2c3e15da613afd53b4b8f2cdb352dc7d12221.zip
op-kernel-dev-95d2c3e15da613afd53b4b8f2cdb352dc7d12221.tar.gz
Merge branch 'drm-next-4.18' of git://people.freedesktop.org/~agd5f/linux into drm-next
Main changes for 4.18. I'd like to do a separate pull for vega20 later this week or next. Highlights: - Reserve pre-OS scanout buffer during init for seemless transition from console to driver - VEGAM support - Improved GPU scheduler documentation - Initial gfxoff support for raven - SR-IOV fixes - Default to non-AGP on PowerPC for radeon - Fine grained clock voltage control for vega10 - Power profiles for vega10 - Further clean up of powerplay/driver interface - Underlay fixes - Display link bw updates - Gamma fixes - Scatter/Gather display support on CZ/ST - Misc bug fixes and clean ups [airlied: fixup v3d vs scheduler API change] Link: https://patchwork.freedesktop.org/patch/msgid/20180515185450.1113-1-alexander.deucher@amd.com Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c244
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c195
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c365
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h)38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c267
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c130
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c225
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c158
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c86
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h579
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c260
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c112
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c272
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c103
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c95
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c221
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c187
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c288
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c94
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c1490
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h524
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h16
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h40
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h64
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c382
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h48
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c65
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h20
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h19
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h26
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h33
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h48
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h7
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h170
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h170
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c489
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c90
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c204
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c99
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c186
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c378
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c206
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c37
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c98
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h27
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c951
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c121
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c107
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c95
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c37
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu75.h760
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h886
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c25
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c39
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c40
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c52
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c2382
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h75
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h8
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c76
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h (renamed from include/drm/gpu_scheduler_trace.h)2
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c51
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c2
-rw-r--r--include/drm/amd_asic_type.h1
-rw-r--r--include/drm/gpu_scheduler.h55
-rw-r--r--include/uapi/drm/amdgpu_drm.h20
259 files changed, 14419 insertions, 4589 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index f300202..68e9f58 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -64,6 +64,10 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o
+# add DF block
+amdgpu-y += \
+ df_v1_7.o
+
# add GMC block
amdgpu-y += \
gmc_v7_0.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c8b605f..03a2c0be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -129,6 +129,7 @@ extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
+extern uint amdgpu_smu_memory_pool_size;
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -137,6 +138,7 @@ extern int amdgpu_si_support;
extern int amdgpu_cik_support;
#endif
+#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -222,10 +224,10 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_LAST
};
-int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
+int amdgpu_device_ip_set_clockgating_state(void *dev,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state);
-int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
+int amdgpu_device_ip_set_powergating_state(void *dev,
enum amd_ip_block_type block_type,
enum amd_powergating_state state);
void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
@@ -681,6 +683,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
@@ -771,9 +775,18 @@ struct amdgpu_rlc {
u32 starting_offsets_start;
u32 reg_list_format_size_bytes;
u32 reg_list_size_bytes;
+ u32 reg_list_format_direct_reg_list_length;
+ u32 save_restore_list_cntl_size_bytes;
+ u32 save_restore_list_gpm_size_bytes;
+ u32 save_restore_list_srm_size_bytes;
u32 *register_list_format;
u32 *register_restore;
+ u8 *save_restore_list_cntl;
+ u8 *save_restore_list_gpm;
+ u8 *save_restore_list_srm;
+
+ bool is_rlc_v2_1;
};
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
@@ -867,6 +880,8 @@ struct amdgpu_gfx_config {
/* gfx configure feature */
uint32_t double_offchip_lds_buf;
+ /* cached value of DB_DEBUG2 */
+ uint32_t db_debug2;
};
struct amdgpu_cu_info {
@@ -938,6 +953,12 @@ struct amdgpu_gfx {
uint32_t ce_feature_version;
uint32_t pfp_feature_version;
uint32_t rlc_feature_version;
+ uint32_t rlc_srlc_fw_version;
+ uint32_t rlc_srlc_feature_version;
+ uint32_t rlc_srlg_fw_version;
+ uint32_t rlc_srlg_feature_version;
+ uint32_t rlc_srls_fw_version;
+ uint32_t rlc_srls_feature_version;
uint32_t mec_feature_version;
uint32_t mec2_feature_version;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
@@ -1204,6 +1225,8 @@ struct amdgpu_asic_funcs {
/* invalidate hdp read cache */
void (*invalidate_hdp)(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+ /* check if the asic needs a full reset of if soft reset will work */
+ bool (*need_full_reset)(struct amdgpu_device *adev);
};
/*
@@ -1368,7 +1391,17 @@ struct amdgpu_nbio_funcs {
void (*detect_hw_virt)(struct amdgpu_device *adev);
};
-
+struct amdgpu_df_funcs {
+ void (*init)(struct amdgpu_device *adev);
+ void (*enable_broadcast_mode)(struct amdgpu_device *adev,
+ bool enable);
+ u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
+ u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u32 *flags);
+};
/* Define the HW IP blocks will be used in driver , add more if necessary */
enum amd_hw_ip_block_type {
GC_HWIP = 1,
@@ -1398,6 +1431,7 @@ enum amd_hw_ip_block_type {
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
+ uint32_t pp_feature;
};
#define AMDGPU_RESET_MAGIC_NUM 64
@@ -1590,6 +1624,7 @@ struct amdgpu_device {
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
const struct amdgpu_nbio_funcs *nbio_funcs;
+ const struct amdgpu_df_funcs *df_funcs;
/* delayed work_func for deferring clockgating during resume */
struct delayed_work late_init_work;
@@ -1764,6 +1799,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
+#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
@@ -1790,6 +1826,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
+#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index a29362f..03ee367 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -290,12 +290,11 @@ static int acp_hw_init(void *handle)
else if (r)
return r;
- r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO,
- 0x5289, 0, &acp_base);
- if (r == -ENODEV)
- return 0;
- else if (r)
- return r;
+ if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
+ return -EINVAL;
+
+ acp_base = adev->rmmio_base;
+
if (adev->asic_type != CHIP_STONEY) {
adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
if (adev->acp.acp_genpd == NULL)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index cd0e8f1..bd36ee9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -243,13 +243,19 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
+ struct amdgpu_bo_param bp;
int r;
uint64_t gpu_addr_tmp = 0;
void *cpu_ptr_tmp = NULL;
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel,
- NULL, &bo);
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+ bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+ r = amdgpu_bo_create(adev, &bp, &bo);
if (r) {
dev_err(adev->dev,
"failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 5296e24..72ab2b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1143,6 +1143,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
+ struct amdgpu_bo_param bp;
int byte_align;
u32 domain, alloc_domain;
u64 alloc_flags;
@@ -1215,8 +1216,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
va, size, domain_string(alloc_domain));
- ret = amdgpu_bo_create(adev, size, byte_align,
- alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo);
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = byte_align;
+ bp.domain = alloc_domain;
+ bp.flags = alloc_flags;
+ bp.type = ttm_bo_type_device;
+ bp.resv = NULL;
+ ret = amdgpu_bo_create(adev, &bp, &bo);
if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n",
domain_string(alloc_domain), ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 1ae5ae8..1bcb2b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -550,7 +550,7 @@ static int amdgpu_atpx_init(void)
* look up whether we are the integrated or discrete GPU (all asics).
* Returns the client id.
*/
-static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
+static enum vga_switcheroo_client_id amdgpu_atpx_get_client_id(struct pci_dev *pdev)
{
if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
return VGA_SWITCHEROO_IGD;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 02b849b..19cfff3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -75,13 +75,20 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
{
struct amdgpu_bo *dobj = NULL;
struct amdgpu_bo *sobj = NULL;
+ struct amdgpu_bo_param bp;
uint64_t saddr, daddr;
int r, n;
int time;
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = sdomain;
+ bp.flags = 0;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
n = AMDGPU_BENCHMARK_ITERATIONS;
- r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0,
- ttm_bo_type_kernel, NULL, &sobj);
+ r = amdgpu_bo_create(adev, &bp, &sobj);
if (r) {
goto out_cleanup;
}
@@ -93,8 +100,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
if (r) {
goto out_cleanup;
}
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0,
- ttm_bo_type_kernel, NULL, &dobj);
+ bp.domain = ddomain;
+ r = amdgpu_bo_create(adev, &bp, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 71a57b2..5b3d3bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -23,7 +23,6 @@
*/
#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/pci.h>
#include <drm/drmP.h>
#include <linux/firmware.h>
#include <drm/amdgpu_drm.h>
@@ -109,121 +108,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
WARN(1, "Invalid indirect register space");
}
-static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
- enum cgs_resource_type resource_type,
- uint64_t size,
- uint64_t offset,
- uint64_t *resource_base)
-{
- CGS_FUNC_ADEV;
-
- if (resource_base == NULL)
- return -EINVAL;
-
- switch (resource_type) {
- case CGS_RESOURCE_TYPE_MMIO:
- if (adev->rmmio_size == 0)
- return -ENOENT;
- if ((offset + size) > adev->rmmio_size)
- return -EINVAL;
- *resource_base = adev->rmmio_base;
- return 0;
- case CGS_RESOURCE_TYPE_DOORBELL:
- if (adev->doorbell.size == 0)
- return -ENOENT;
- if ((offset + size) > adev->doorbell.size)
- return -EINVAL;
- *resource_base = adev->doorbell.base;
- return 0;
- case CGS_RESOURCE_TYPE_FB:
- case CGS_RESOURCE_TYPE_IO:
- case CGS_RESOURCE_TYPE_ROM:
- default:
- return -EINVAL;
- }
-}
-
-static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
- unsigned table, uint16_t *size,
- uint8_t *frev, uint8_t *crev)
-{
- CGS_FUNC_ADEV;
- uint16_t data_start;
-
- if (amdgpu_atom_parse_data_header(
- adev->mode_info.atom_context, table, size,
- frev, crev, &data_start))
- return (uint8_t*)adev->mode_info.atom_context->bios +
- data_start;
-
- return NULL;
-}
-
-static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
- uint8_t *frev, uint8_t *crev)
-{
- CGS_FUNC_ADEV;
-
- if (amdgpu_atom_parse_cmd_header(
- adev->mode_info.atom_context, table,
- frev, crev))
- return 0;
-
- return -EINVAL;
-}
-
-static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
- void *args)
-{
- CGS_FUNC_ADEV;
-
- return amdgpu_atom_execute_table(
- adev->mode_info.atom_context, table, args);
-}
-
-static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
- enum amd_ip_block_type block_type,
- enum amd_clockgating_state state)
-{
- CGS_FUNC_ADEV;
- int i, r = -1;
-
- for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
- continue;
-
- if (adev->ip_blocks[i].version->type == block_type) {
- r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
- (void *)adev,
- state);
- break;
- }
- }
- return r;
-}
-
-static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
- enum amd_ip_block_type block_type,
- enum amd_powergating_state state)
-{
- CGS_FUNC_ADEV;
- int i, r = -1;
-
- for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
- continue;
-
- if (adev->ip_blocks[i].version->type == block_type) {
- r = adev->ip_blocks[i].version->funcs->set_powergating_state(
- (void *)adev,
- state);
- break;
- }
- }
- return r;
-}
-
-
static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
{
CGS_FUNC_ADEV;
@@ -271,18 +155,6 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
return result;
}
-static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
-{
- CGS_FUNC_ADEV;
- if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- return 0;
- }
- /* cannot release other firmware because they are not created by cgs */
- return -EINVAL;
-}
-
static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
enum cgs_ucode_id type)
{
@@ -326,34 +198,6 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
return fw_version;
}
-static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
- bool en)
-{
- CGS_FUNC_ADEV;
-
- if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
- adev->gfx.rlc.funcs->exit_safe_mode == NULL)
- return 0;
-
- if (en)
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
- else
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
-
- return 0;
-}
-
-static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device,
- bool lock)
-{
- CGS_FUNC_ADEV;
-
- if (lock)
- mutex_lock(&adev->grbm_idx_mutex);
- else
- mutex_unlock(&adev->grbm_idx_mutex);
-}
-
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
@@ -541,6 +385,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
case CHIP_POLARIS12:
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
break;
+ case CHIP_VEGAM:
+ strcpy(fw_name, "amdgpu/vegam_smc.bin");
+ break;
case CHIP_VEGA10:
if ((adev->pdev->device == 0x687f) &&
((adev->pdev->revision == 0xc0) ||
@@ -598,97 +445,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return 0;
}
-static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
-{
- CGS_FUNC_ADEV;
- return amdgpu_sriov_vf(adev);
-}
-
-static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
- struct cgs_display_info *info)
-{
- CGS_FUNC_ADEV;
- struct cgs_mode_info *mode_info;
-
- if (info == NULL)
- return -EINVAL;
-
- mode_info = info->mode_info;
- if (mode_info)
- /* if the displays are off, vblank time is max */
- mode_info->vblank_time_us = 0xffffffff;
-
- if (!amdgpu_device_has_dc_support(adev)) {
- struct amdgpu_crtc *amdgpu_crtc;
- struct drm_device *ddev = adev->ddev;
- struct drm_crtc *crtc;
- uint32_t line_time_us, vblank_lines;
-
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled) {
- info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
- info->display_count++;
- }
- if (mode_info != NULL &&
- crtc->enabled && amdgpu_crtc->enabled &&
- amdgpu_crtc->hw_mode.clock) {
- line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
- amdgpu_crtc->hw_mode.clock;
- vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
- amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2);
- mode_info->vblank_time_us = vblank_lines * line_time_us;
- mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- /* we have issues with mclk switching with refresh rates
- * over 120 hz on the non-DC code.
- */
- if (mode_info->refresh_rate > 120)
- mode_info->vblank_time_us = 0;
- mode_info = NULL;
- }
- }
- }
- } else {
- info->display_count = adev->pm.pm_display_cfg.num_display;
- if (mode_info != NULL) {
- mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
- mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
- }
- }
- return 0;
-}
-
-
-static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
-{
- CGS_FUNC_ADEV;
-
- adev->pm.dpm_enabled = enabled;
-
- return 0;
-}
-
static const struct cgs_ops amdgpu_cgs_ops = {
.read_register = amdgpu_cgs_read_register,
.write_register = amdgpu_cgs_write_register,
.read_ind_register = amdgpu_cgs_read_ind_register,
.write_ind_register = amdgpu_cgs_write_ind_register,
- .get_pci_resource = amdgpu_cgs_get_pci_resource,
- .atom_get_data_table = amdgpu_cgs_atom_get_data_table,
- .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
- .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
.get_firmware_info = amdgpu_cgs_get_firmware_info,
- .rel_firmware = amdgpu_cgs_rel_firmware,
- .set_powergating_state = amdgpu_cgs_set_powergating_state,
- .set_clockgating_state = amdgpu_cgs_set_clockgating_state,
- .get_active_displays_info = amdgpu_cgs_get_active_displays_info,
- .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
- .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
- .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
- .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
};
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 96501ff..8e66851 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -691,7 +691,7 @@ static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
return ret;
}
-static int amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
@@ -843,7 +843,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
return ret;
}
-static int amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
@@ -1172,7 +1172,7 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
amdgpu_connector->use_digital = true;
}
-static int amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
@@ -1448,7 +1448,7 @@ out:
return ret;
}
-static int amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 8e66f37..9c1d491 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -382,8 +382,7 @@ retry:
p->bytes_moved += ctx.bytes_moved;
if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
- bo->tbo.mem.mem_type == TTM_PL_VRAM &&
- bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ amdgpu_bo_in_cpu_visible_vram(bo))
p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
@@ -411,7 +410,6 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *candidate = p->evictable;
struct amdgpu_bo *bo = candidate->robj;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- u64 initial_bytes_moved, bytes_moved;
bool update_bytes_moved_vis;
uint32_t other;
@@ -435,18 +433,14 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
continue;
/* Good we can try to move this BO somewhere else */
- amdgpu_ttm_placement_from_domain(bo, other);
update_bytes_moved_vis =
adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
- bo->tbo.mem.mem_type == TTM_PL_VRAM &&
- bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT;
- initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
+ amdgpu_bo_in_cpu_visible_vram(bo);
+ amdgpu_ttm_placement_from_domain(bo, other);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- bytes_moved = atomic64_read(&adev->num_bytes_moved) -
- initial_bytes_moved;
- p->bytes_moved += bytes_moved;
+ p->bytes_moved += ctx.bytes_moved;
if (update_bytes_moved_vis)
- p->bytes_moved_vis += bytes_moved;
+ p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r))
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 09d35051..a8e531d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -91,7 +91,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
continue;
r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
- rq, amdgpu_sched_jobs, &ctx->guilty);
+ rq, &ctx->guilty);
if (r)
goto failed;
}
@@ -111,8 +111,9 @@ failed:
return r;
}
-static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
+static void amdgpu_ctx_fini(struct kref *ref)
{
+ struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
struct amdgpu_device *adev = ctx->adev;
unsigned i, j;
@@ -125,13 +126,11 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
kfree(ctx->fences);
ctx->fences = NULL;
- for (i = 0; i < adev->num_rings; i++)
- drm_sched_entity_fini(&adev->rings[i]->sched,
- &ctx->rings[i].entity);
-
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
mutex_destroy(&ctx->lock);
+
+ kfree(ctx);
}
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
@@ -170,12 +169,15 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
static void amdgpu_ctx_do_release(struct kref *ref)
{
struct amdgpu_ctx *ctx;
+ u32 i;
ctx = container_of(ref, struct amdgpu_ctx, refcount);
- amdgpu_ctx_fini(ctx);
+ for (i = 0; i < ctx->adev->num_rings; i++)
+ drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
+ &ctx->rings[i].entity);
- kfree(ctx);
+ amdgpu_ctx_fini(ref);
}
static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
@@ -419,9 +421,11 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
if (other) {
signed long r;
- r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ r = dma_fence_wait(other, true);
if (r < 0) {
- DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+
return r;
}
}
@@ -435,16 +439,62 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
idr_init(&mgr->ctx_handles);
}
+void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+{
+ struct amdgpu_ctx *ctx;
+ struct idr *idp;
+ uint32_t id, i;
+
+ idp = &mgr->ctx_handles;
+
+ idr_for_each_entry(idp, ctx, id) {
+
+ if (!ctx->adev)
+ return;
+
+ for (i = 0; i < ctx->adev->num_rings; i++)
+ if (kref_read(&ctx->refcount) == 1)
+ drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
+ &ctx->rings[i].entity);
+ else
+ DRM_ERROR("ctx %p is still alive\n", ctx);
+ }
+}
+
+void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
+{
+ struct amdgpu_ctx *ctx;
+ struct idr *idp;
+ uint32_t id, i;
+
+ idp = &mgr->ctx_handles;
+
+ idr_for_each_entry(idp, ctx, id) {
+
+ if (!ctx->adev)
+ return;
+
+ for (i = 0; i < ctx->adev->num_rings; i++)
+ if (kref_read(&ctx->refcount) == 1)
+ drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
+ &ctx->rings[i].entity);
+ else
+ DRM_ERROR("ctx %p is still alive\n", ctx);
+ }
+}
+
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
uint32_t id;
+ amdgpu_ctx_mgr_entity_cleanup(mgr);
+
idp = &mgr->ctx_handles;
idr_for_each_entry(idp, ctx, id) {
- if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
+ if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
DRM_ERROR("ctx %p is still alive\n", ctx);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 448d69f..f5fb937 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -28,8 +28,13 @@
#include <linux/debugfs.h>
#include "amdgpu.h"
-/*
- * Debugfs
+/**
+ * amdgpu_debugfs_add_files - Add simple debugfs entries
+ *
+ * @adev: Device to attach debugfs entries to
+ * @files: Array of function callbacks that respond to reads
+ * @nfiles: Number of callbacks to register
+ *
*/
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
const struct drm_info_list *files,
@@ -64,7 +69,33 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
#if defined(CONFIG_DEBUG_FS)
-
+/**
+ * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
+ *
+ * @read: True if reading
+ * @f: open file handle
+ * @buf: User buffer to write/read to
+ * @size: Number of bytes to write/read
+ * @pos: Offset to seek to
+ *
+ * This debugfs entry has special meaning on the offset being sought.
+ * Various bits have different meanings:
+ *
+ * Bit 62: Indicates a GRBM bank switch is needed
+ * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is
+ * zero)
+ * Bits 24..33: The SE or ME selector if needed
+ * Bits 34..43: The SH (or SA) or PIPE selector if needed
+ * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed
+ *
+ * Bit 23: Indicates that the PM power gating lock should be held
+ * This is necessary to read registers that might be
+ * unreliable during a power gating transistion.
+ *
+ * The lower bits are the BYTE offset of the register to read. This
+ * allows reading multiple registers in a single call and having
+ * the returned size reflect that.
+ */
static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
char __user *buf, size_t size, loff_t *pos)
{
@@ -164,19 +195,37 @@ end:
return result;
}
-
+/**
+ * amdgpu_debugfs_regs_read - Callback for reading MMIO registers
+ */
static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
}
+/**
+ * amdgpu_debugfs_regs_write - Callback for writing MMIO registers
+ */
static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
}
+
+/**
+ * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to read. This
+ * allows reading multiple registers in a single call and having
+ * the returned size reflect that.
+ */
static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -204,6 +253,18 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
return result;
}
+/**
+ * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to write. This
+ * allows writing multiple registers in a single call and having
+ * the returned size reflect that.
+ */
static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
@@ -232,6 +293,18 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
return result;
}
+/**
+ * amdgpu_debugfs_regs_didt_read - Read from a DIDT register
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to read. This
+ * allows reading multiple registers in a single call and having
+ * the returned size reflect that.
+ */
static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -259,6 +332,18 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
return result;
}
+/**
+ * amdgpu_debugfs_regs_didt_write - Write to a DIDT register
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to write. This
+ * allows writing multiple registers in a single call and having
+ * the returned size reflect that.
+ */
static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
@@ -287,6 +372,18 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
return result;
}
+/**
+ * amdgpu_debugfs_regs_smc_read - Read from a SMC register
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to read. This
+ * allows reading multiple registers in a single call and having
+ * the returned size reflect that.
+ */
static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -314,6 +411,18 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
return result;
}
+/**
+ * amdgpu_debugfs_regs_smc_write - Write to a SMC register
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to write. This
+ * allows writing multiple registers in a single call and having
+ * the returned size reflect that.
+ */
static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
@@ -342,6 +451,20 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
return result;
}
+/**
+ * amdgpu_debugfs_gca_config_read - Read from gfx config data
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * This file is used to access configuration data in a somewhat
+ * stable fashion. The format is a series of DWORDs with the first
+ * indicating which revision it is. New content is appended to the
+ * end so that older software can still read the data.
+ */
+
static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -418,6 +541,19 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
return result;
}
+/**
+ * amdgpu_debugfs_sensor_read - Read from the powerplay sensors
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * The offset is treated as the BYTE address of one of the sensors
+ * enumerated in amd/include/kgd_pp_interface.h under the
+ * 'amd_pp_sensors' enumeration. For instance to read the UVD VCLK
+ * you would use the offset 3 * 4 = 12.
+ */
static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -428,7 +564,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
if (size & 3 || *pos & 0x3)
return -EINVAL;
- if (amdgpu_dpm == 0)
+ if (!adev->pm.dpm_enabled)
return -EINVAL;
/* convert offset to sensor number */
@@ -457,6 +593,27 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
return !r ? outsize : r;
}
+/** amdgpu_debugfs_wave_read - Read WAVE STATUS data
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * The offset being sought changes which wave that the status data
+ * will be returned for. The bits are used as follows:
+ *
+ * Bits 0..6: Byte offset into data
+ * Bits 7..14: SE selector
+ * Bits 15..22: SH/SA selector
+ * Bits 23..30: CU/{WGP+SIMD} selector
+ * Bits 31..36: WAVE ID selector
+ * Bits 37..44: SIMD ID selector
+ *
+ * The returned data begins with one DWORD of version information
+ * Followed by WAVE STATUS registers relevant to the GFX IP version
+ * being used. See gfx_v8_0_read_wave_data() for an example output.
+ */
static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -507,6 +664,28 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
return result;
}
+/** amdgpu_debugfs_gpr_read - Read wave gprs
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * The offset being sought changes which wave that the status data
+ * will be returned for. The bits are used as follows:
+ *
+ * Bits 0..11: Byte offset into data
+ * Bits 12..19: SE selector
+ * Bits 20..27: SH/SA selector
+ * Bits 28..35: CU/{WGP+SIMD} selector
+ * Bits 36..43: WAVE ID selector
+ * Bits 37..44: SIMD ID selector
+ * Bits 52..59: Thread selector
+ * Bits 60..61: Bank selector (VGPR=0,SGPR=1)
+ *
+ * The return data comes from the SGPR or VGPR register bank for
+ * the selected operational unit.
+ */
static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -637,6 +816,12 @@ static const char *debugfs_regs_names[] = {
"amdgpu_gpr",
};
+/**
+ * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide
+ * register access.
+ *
+ * @adev: The device to attach the debugfs entries to
+ */
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
struct drm_minor *minor = adev->ddev->primary;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 34af664..9fb20a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -83,6 +83,7 @@ static const char *amdgpu_asic_name[] = {
"POLARIS10",
"POLARIS11",
"POLARIS12",
+ "VEGAM",
"VEGA10",
"VEGA12",
"RAVEN",
@@ -690,6 +691,8 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
{
u64 size_af, size_bf;
+ mc->gart_size += adev->pm.smu_prv_buffer_size;
+
size_af = adev->gmc.mc_mask - mc->vram_end;
size_bf = mc->vram_start;
if (size_bf > size_af) {
@@ -907,6 +910,46 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
}
}
+static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
+{
+ struct sysinfo si;
+ bool is_os_64 = (sizeof(void *) == 8) ? true : false;
+ uint64_t total_memory;
+ uint64_t dram_size_seven_GB = 0x1B8000000;
+ uint64_t dram_size_three_GB = 0xB8000000;
+
+ if (amdgpu_smu_memory_pool_size == 0)
+ return;
+
+ if (!is_os_64) {
+ DRM_WARN("Not 64-bit OS, feature not supported\n");
+ goto def_value;
+ }
+ si_meminfo(&si);
+ total_memory = (uint64_t)si.totalram * si.mem_unit;
+
+ if ((amdgpu_smu_memory_pool_size == 1) ||
+ (amdgpu_smu_memory_pool_size == 2)) {
+ if (total_memory < dram_size_three_GB)
+ goto def_value1;
+ } else if ((amdgpu_smu_memory_pool_size == 4) ||
+ (amdgpu_smu_memory_pool_size == 8)) {
+ if (total_memory < dram_size_seven_GB)
+ goto def_value1;
+ } else {
+ DRM_WARN("Smu memory pool size not supported\n");
+ goto def_value;
+ }
+ adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
+
+ return;
+
+def_value1:
+ DRM_WARN("No enough system memory\n");
+def_value:
+ adev->pm.smu_prv_buffer_size = 0;
+}
+
/**
* amdgpu_device_check_arguments - validate module params
*
@@ -948,6 +991,8 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
amdgpu_vm_fragment_size = -1;
}
+ amdgpu_device_check_smu_prv_buffer_size(adev);
+
amdgpu_device_check_vm_size(adev);
amdgpu_device_check_block_size(adev);
@@ -1039,10 +1084,11 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
* the hardware IP specified.
* Returns the error code from the last instance.
*/
-int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
+int amdgpu_device_ip_set_clockgating_state(void *dev,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state)
{
+ struct amdgpu_device *adev = dev;
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1072,10 +1118,11 @@ int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
* the hardware IP specified.
* Returns the error code from the last instance.
*/
-int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
+int amdgpu_device_ip_set_powergating_state(void *dev,
enum amd_ip_block_type block_type,
enum amd_powergating_state state)
{
+ struct amdgpu_device *adev = dev;
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1320,9 +1367,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
case CHIP_TOPAZ:
case CHIP_TONGA:
case CHIP_FIJI:
- case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
case CHIP_CARRIZO:
case CHIP_STONEY:
#ifdef CONFIG_DRM_AMDGPU_SI
@@ -1428,9 +1476,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
case CHIP_TOPAZ:
case CHIP_TONGA:
case CHIP_FIJI:
- case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
case CHIP_CARRIZO:
case CHIP_STONEY:
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
@@ -1499,6 +1548,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
return -EAGAIN;
}
+ adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d <%s>\n",
@@ -1654,6 +1705,10 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
if (amdgpu_emu_mode == 1)
return 0;
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -1704,8 +1759,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
}
}
- mod_delayed_work(system_wq, &adev->late_init_work,
- msecs_to_jiffies(AMDGPU_RESUME_MS));
+ queue_delayed_work(system_wq, &adev->late_init_work,
+ msecs_to_jiffies(AMDGPU_RESUME_MS));
amdgpu_device_fill_reset_magic(adev);
@@ -1850,6 +1905,12 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_request_full_gpu(adev, false);
+ /* ungate SMC block powergating */
+ if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_SMC,
+ AMD_CG_STATE_UNGATE);
+
/* ungate SMC block first */
r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
AMD_CG_STATE_UNGATE);
@@ -2086,14 +2147,12 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_MULLINS:
case CHIP_CARRIZO:
case CHIP_STONEY:
- case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
case CHIP_TONGA:
case CHIP_FIJI:
-#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
- return amdgpu_dc != 0;
-#endif
case CHIP_VEGA10:
case CHIP_VEGA12:
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -2375,10 +2434,6 @@ fence_driver_init:
goto failed;
}
- r = amdgpu_ib_ring_tests(adev);
- if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
-
if (amdgpu_sriov_vf(adev))
amdgpu_virt_init_data_exchange(adev);
@@ -2539,7 +2594,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
+ struct drm_framebuffer *fb = crtc->primary->fb;
struct amdgpu_bo *robj;
if (amdgpu_crtc->cursor_bo) {
@@ -2551,10 +2606,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
}
}
- if (rfb == NULL || rfb->obj == NULL) {
+ if (fb == NULL || fb->obj[0] == NULL) {
continue;
}
- robj = gem_to_amdgpu_bo(rfb->obj);
+ robj = gem_to_amdgpu_bo(fb->obj[0]);
/* don't unpin kernel fb objects */
if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
r = amdgpu_bo_reserve(robj, true);
@@ -2640,11 +2695,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
amdgpu_fence_driver_resume(adev);
- if (resume) {
- r = amdgpu_ib_ring_tests(adev);
- if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
- }
r = amdgpu_device_ip_late_init(adev);
if (r)
@@ -2736,6 +2786,9 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return true;
+ if (amdgpu_asic_need_full_reset(adev))
+ return true;
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -2792,6 +2845,9 @@ static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
{
int i;
+ if (amdgpu_asic_need_full_reset(adev))
+ return true;
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -3087,20 +3143,19 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
/* now we are okay to resume SMC/CP/SDMA */
r = amdgpu_device_ip_reinit_late_sriov(adev);
- amdgpu_virt_release_full_gpu(adev, true);
if (r)
goto error;
amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
+error:
+ amdgpu_virt_release_full_gpu(adev, true);
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
atomic_inc(&adev->vram_lost_counter);
r = amdgpu_device_handle_vram_lost(adev);
}
-error:
-
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 93f700a..76ee8e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,6 +35,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
static void amdgpu_display_flip_callback(struct dma_fence *f,
@@ -151,8 +152,6 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_framebuffer *old_amdgpu_fb;
- struct amdgpu_framebuffer *new_amdgpu_fb;
struct drm_gem_object *obj;
struct amdgpu_flip_work *work;
struct amdgpu_bo *new_abo;
@@ -174,15 +173,13 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
/* schedule unpin of the old buffer */
- old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- obj = old_amdgpu_fb->obj;
+ obj = crtc->primary->fb->obj[0];
/* take a reference to the old object */
work->old_abo = gem_to_amdgpu_bo(obj);
amdgpu_bo_ref(work->old_abo);
- new_amdgpu_fb = to_amdgpu_framebuffer(fb);
- obj = new_amdgpu_fb->obj;
+ obj = fb->obj[0];
new_abo = gem_to_amdgpu_bo(obj);
/* pin the new buffer */
@@ -192,7 +189,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup;
}
- r = amdgpu_bo_pin(new_abo, amdgpu_display_framebuffer_domains(adev), &base);
+ r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base);
if (unlikely(r != 0)) {
DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
@@ -482,31 +479,12 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
return true;
}
-static void amdgpu_display_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
-
- drm_gem_object_put_unlocked(amdgpu_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(amdgpu_fb);
-}
-
-static int amdgpu_display_user_framebuffer_create_handle(
- struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
-
- return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle);
-}
-
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
- .destroy = amdgpu_display_user_framebuffer_destroy,
- .create_handle = amdgpu_display_user_framebuffer_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
};
-uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev)
+uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev)
{
uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
@@ -526,11 +504,11 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj)
{
int ret;
- rfb->obj = obj;
+ rfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret) {
- rfb->obj = NULL;
+ rfb->base.obj[0] = NULL;
return ret;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 2b11d80..f66e3e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -23,7 +23,7 @@
#ifndef __AMDGPU_DISPLAY_H__
#define __AMDGPU_DISPLAY_H__
-uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev);
+uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev);
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index e997ebbe43..def1010 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -115,6 +115,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
pr_cont("\n");
}
+void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
+{
+ struct drm_device *ddev = adev->ddev;
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+
+ adev->pm.dpm.new_active_crtcs = 0;
+ adev->pm.dpm.new_active_crtc_count = 0;
+ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+ if (amdgpu_crtc->enabled) {
+ adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
+ adev->pm.dpm.new_active_crtc_count++;
+ }
+ }
+ }
+}
+
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 643d008..dd6203a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -52,8 +52,6 @@ enum amdgpu_dpm_event_src {
AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
};
-#define SCLK_DEEP_SLEEP_MASK 0x8
-
struct amdgpu_ps {
u32 caps; /* vbios flags */
u32 class; /* vbios flags */
@@ -349,12 +347,6 @@ enum amdgpu_pcie_gen {
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
(adev)->powerplay.pp_handle, msg_id))
-#define amdgpu_dpm_notify_smu_memory_info(adev, virtual_addr_low, \
- virtual_addr_hi, mc_addr_low, mc_addr_hi, size) \
- ((adev)->powerplay.pp_funcs->notify_smu_memory_info)( \
- (adev)->powerplay.pp_handle, virtual_addr_low, \
- virtual_addr_hi, mc_addr_low, mc_addr_hi, size)
-
#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
((adev)->powerplay.pp_funcs->get_power_profile_mode(\
(adev)->powerplay.pp_handle, buf))
@@ -445,6 +437,8 @@ struct amdgpu_pm {
uint32_t pcie_gen_mask;
uint32_t pcie_mlw_mask;
struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
+ uint32_t smu_prv_buffer_size;
+ struct amdgpu_bo *smu_prv_buffer;
};
#define R600_SSTU_DFLT 0
@@ -482,6 +476,7 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
struct amdgpu_ps *rps);
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
+void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
bool amdgpu_is_uvd_state(u32 class, u32 class2);
void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
u32 *p, u32 *u);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0b19482..739e7e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -75,9 +75,10 @@
* - 3.23.0 - Add query for VRAM lost counter
* - 3.24.0 - Add high priority compute support for gfx9
* - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
+ * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 25
+#define KMS_DRIVER_MINOR 26
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -121,7 +122,7 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
-uint amdgpu_pp_feature_mask = 0xffffbfff;
+uint amdgpu_pp_feature_mask = 0xffff3fff; /* gfxoff (bit 15) disabled by default */
int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0;
@@ -132,6 +133,7 @@ int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1;
int amdgpu_gpu_recovery = -1; /* auto */
int amdgpu_emu_mode = 0;
+uint amdgpu_smu_memory_pool_size = 0;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -316,6 +318,11 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
#endif
+MODULE_PARM_DESC(smu_memory_pool_size,
+ "reserve gtt for smu debug usage, 0 = disable,"
+ "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
+module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -534,6 +541,9 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ /* VEGAM */
+ {0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
+ {0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
/* Vega 10 */
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 1206301..bc5fd8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -137,7 +137,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
/* need to align pitch with crtc limits */
mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
fb_tiled);
- domain = amdgpu_display_framebuffer_domains(adev);
+ domain = amdgpu_display_supported_domains(adev);
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;
@@ -292,9 +292,9 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb
drm_fb_helper_unregister_fbi(&rfbdev->helper);
- if (rfb->obj) {
- amdgpufb_destroy_pinned_object(rfb->obj);
- rfb->obj = NULL;
+ if (rfb->base.obj[0]) {
+ amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
+ rfb->base.obj[0] = NULL;
drm_framebuffer_unregister_private(&rfb->base);
drm_framebuffer_cleanup(&rfb->base);
}
@@ -377,7 +377,7 @@ int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
if (!adev->mode_info.rfbdev)
return 0;
- robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj);
+ robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]);
size += amdgpu_bo_size(robj);
return size;
}
@@ -386,7 +386,7 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
{
if (!adev->mode_info.rfbdev)
return false;
- if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj))
+ if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]))
return true;
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 97449e0..d09fcab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -131,7 +131,8 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+ unsigned flags)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
@@ -149,7 +150,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
adev->fence_context + ring->idx,
seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
- seq, AMDGPU_FENCE_FLAG_INT);
+ seq, flags | AMDGPU_FENCE_FLAG_INT);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
/* This function can't be called concurrently anyway, otherwise
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index cf0f186..17d6b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -113,12 +113,17 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
int r;
if (adev->gart.robj == NULL) {
- r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- ttm_bo_type_kernel, NULL,
- &adev->gart.robj);
+ struct amdgpu_bo_param bp;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.size = adev->gart.table_size;
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+ r = amdgpu_bo_create(adev, &bp, &adev->gart.robj);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 46b9ea4..2c8e273 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -48,17 +48,25 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
struct drm_gem_object **obj)
{
struct amdgpu_bo *bo;
+ struct amdgpu_bo_param bp;
int r;
+ memset(&bp, 0, sizeof(bp));
*obj = NULL;
/* At least align on page size */
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
+ bp.size = size;
+ bp.byte_align = alignment;
+ bp.type = type;
+ bp.resv = resv;
+ bp.preferred_domain = initial_domain;
retry:
- r = amdgpu_bo_create(adev, size, alignment, initial_domain,
- flags, type, resv, &bo);
+ bp.flags = flags;
+ bp.domain = initial_domain;
+ r = amdgpu_bo_create(adev, &bp, &bo);
if (r) {
if (r != -ERESTARTSYS) {
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
@@ -221,12 +229,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
/* reject invalid gem domains */
- if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
- AMDGPU_GEM_DOMAIN_GTT |
- AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GDS |
- AMDGPU_GEM_DOMAIN_GWS |
- AMDGPU_GEM_DOMAIN_OA))
+ if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
return -EINVAL;
/* create a gem object to contain this object in */
@@ -771,16 +774,23 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
}
#if defined(CONFIG_DEBUG_FS)
+
+#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag) \
+ if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
+ seq_printf((m), " " #flag); \
+ }
+
static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
{
struct drm_gem_object *gobj = ptr;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
struct seq_file *m = data;
+ struct dma_buf_attachment *attachment;
+ struct dma_buf *dma_buf;
unsigned domain;
const char *placement;
unsigned pin_count;
- uint64_t offset;
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
switch (domain) {
@@ -798,13 +808,27 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
seq_printf(m, "\t0x%08x: %12ld byte %s",
id, amdgpu_bo_size(bo), placement);
- offset = READ_ONCE(bo->tbo.mem.start);
- if (offset != AMDGPU_BO_INVALID_OFFSET)
- seq_printf(m, " @ 0x%010Lx", offset);
-
pin_count = READ_ONCE(bo->pin_count);
if (pin_count)
seq_printf(m, " pin count %d", pin_count);
+
+ dma_buf = READ_ONCE(bo->gem_base.dma_buf);
+ attachment = READ_ONCE(bo->gem_base.import_attach);
+
+ if (attachment)
+ seq_printf(m, " imported from %p", dma_buf);
+ else if (dma_buf)
+ seq_printf(m, " exported as %p", dma_buf);
+
+ amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
+ amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
+ amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
+ amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
+ amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
+ amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
+ amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
+ amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
+
seq_printf(m, "\n");
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 311589e..f70eeed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -127,6 +127,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_vm *vm;
uint64_t fence_ctx;
uint32_t status = 0, alloc_size;
+ unsigned fence_flags = 0;
unsigned i;
int r = 0;
@@ -227,7 +228,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
#endif
amdgpu_asic_invalidate_hdp(adev, ring);
- r = amdgpu_fence_emit(ring, f);
+ if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
+ fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
+
+ r = amdgpu_fence_emit(ring, f, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vmid)
@@ -242,7 +246,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
/* wrap the last IB with fence */
if (job && job->uf_addr) {
amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
- AMDGPU_FENCE_FLAG_64BIT);
+ fence_flags | AMDGPU_FENCE_FLAG_64BIT);
}
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 4b7824d..eb4785e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -31,6 +31,7 @@
#include "amdgpu_sched.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
+#include "atom.h"
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
@@ -214,6 +215,18 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->gfx.rlc_fw_version;
fw_info->feature = adev->gfx.rlc_feature_version;
break;
+ case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
+ fw_info->ver = adev->gfx.rlc_srlc_fw_version;
+ fw_info->feature = adev->gfx.rlc_srlc_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
+ fw_info->ver = adev->gfx.rlc_srlg_fw_version;
+ fw_info->feature = adev->gfx.rlc_srlg_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
+ fw_info->ver = adev->gfx.rlc_srls_fw_version;
+ fw_info->feature = adev->gfx.rlc_srls_feature_version;
+ break;
case AMDGPU_INFO_FW_GFX_MEC:
if (query_fw->index == 0) {
fw_info->ver = adev->gfx.mec_fw_version;
@@ -279,6 +292,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (!info->return_size || !info->return_pointer)
return -EINVAL;
+ /* Ensure IB tests are run on ring */
+ flush_delayed_work(&adev->late_init_work);
+
switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING:
ui32 = adev->accel_working;
@@ -701,10 +717,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
}
case AMDGPU_INFO_SENSOR: {
- struct pp_gpu_power query = {0};
- int query_size = sizeof(query);
-
- if (amdgpu_dpm == 0)
+ if (!adev->pm.dpm_enabled)
return -ENOENT;
switch (info->sensor_info.type) {
@@ -746,10 +759,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
/* get average GPU power */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_GPU_POWER,
- (void *)&query, &query_size)) {
+ (void *)&ui32, &ui32_size)) {
return -EINVAL;
}
- ui32 = query.average_gpu_power >> 8;
+ ui32 >>= 8;
break;
case AMDGPU_INFO_SENSOR_VDDNB:
/* get VDDNB in millivolts */
@@ -913,8 +926,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
return;
pm_runtime_get_sync(dev->dev);
-
- amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+ amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
if (adev->asic_type != CHIP_RAVEN) {
amdgpu_uvd_free_handles(adev, file_priv);
@@ -935,6 +947,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
amdgpu_vm_fini(adev, &fpriv->vm);
+ amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+
if (pasid)
amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
amdgpu_bo_unref(&pd);
@@ -1088,6 +1102,7 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_info_firmware fw_info;
struct drm_amdgpu_query_fw query_fw;
+ struct atom_context *ctx = adev->mode_info.atom_context;
int ret, i;
/* VCE */
@@ -1146,6 +1161,30 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
+ /* RLC SAVE RESTORE LIST CNTL */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* RLC SAVE RESTORE LIST GPM MEM */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* RLC SAVE RESTORE LIST SRM MEM */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
/* MEC */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
query_fw.index = 0;
@@ -1210,6 +1249,9 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
+
+ seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index d6416ee..b9e9e8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -308,7 +308,6 @@ struct amdgpu_display_funcs {
struct amdgpu_framebuffer {
struct drm_framebuffer base;
- struct drm_gem_object *obj;
/* caching for later use */
uint64_t address;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 6d08cde..6a9e46a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -191,14 +191,21 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr)
{
+ struct amdgpu_bo_param bp;
bool free = false;
int r;
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = align;
+ bp.domain = domain;
+ bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+
if (!*bo_ptr) {
- r = amdgpu_bo_create(adev, size, align, domain,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- ttm_bo_type_kernel, NULL, bo_ptr);
+ r = amdgpu_bo_create(adev, &bp, bo_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
r);
@@ -341,27 +348,25 @@ fail:
return false;
}
-static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
- int byte_align, u32 domain,
- u64 flags, enum ttm_bo_type type,
- struct reservation_object *resv,
+static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
struct amdgpu_bo **bo_ptr)
{
struct ttm_operation_ctx ctx = {
- .interruptible = (type != ttm_bo_type_kernel),
+ .interruptible = (bp->type != ttm_bo_type_kernel),
.no_wait_gpu = false,
- .resv = resv,
+ .resv = bp->resv,
.flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
};
struct amdgpu_bo *bo;
- unsigned long page_align;
+ unsigned long page_align, size = bp->size;
size_t acc_size;
int r;
- page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+ page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
size = ALIGN(size, PAGE_SIZE);
- if (!amdgpu_bo_validate_size(adev, size, domain))
+ if (!amdgpu_bo_validate_size(adev, size, bp->domain))
return -ENOMEM;
*bo_ptr = NULL;
@@ -375,18 +380,14 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
- bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT |
- AMDGPU_GEM_DOMAIN_CPU |
- AMDGPU_GEM_DOMAIN_GDS |
- AMDGPU_GEM_DOMAIN_GWS |
- AMDGPU_GEM_DOMAIN_OA);
+ bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
+ bp->domain;
bo->allowed_domains = bo->preferred_domains;
- if (type != ttm_bo_type_kernel &&
+ if (bp->type != ttm_bo_type_kernel &&
bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
- bo->flags = flags;
+ bo->flags = bp->flags;
#ifdef CONFIG_X86_32
/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
@@ -417,11 +418,13 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
#endif
bo->tbo.bdev = &adev->mman.bdev;
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_ttm_placement_from_domain(bo, bp->domain);
+ if (bp->type == ttm_bo_type_kernel)
+ bo->tbo.priority = 1;
- r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
+ r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
&bo->placement, page_align, &ctx, acc_size,
- NULL, resv, &amdgpu_ttm_bo_destroy);
+ NULL, bp->resv, &amdgpu_ttm_bo_destroy);
if (unlikely(r != 0))
return r;
@@ -433,10 +436,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
else
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
- if (type == ttm_bo_type_kernel)
- bo->tbo.priority = 1;
-
- if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
+ if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
struct dma_fence *fence;
@@ -449,20 +449,20 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
bo->tbo.moving = dma_fence_get(fence);
dma_fence_put(fence);
}
- if (!resv)
+ if (!bp->resv)
amdgpu_bo_unreserve(bo);
*bo_ptr = bo;
trace_amdgpu_bo_create(bo);
/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
- if (type == ttm_bo_type_device)
+ if (bp->type == ttm_bo_type_device)
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
return 0;
fail_unreserve:
- if (!resv)
+ if (!bp->resv)
ww_mutex_unlock(&bo->tbo.resv->lock);
amdgpu_bo_unref(&bo);
return r;
@@ -472,16 +472,22 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
unsigned long size, int byte_align,
struct amdgpu_bo *bo)
{
+ struct amdgpu_bo_param bp;
int r;
if (bo->shadow)
return 0;
- r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_SHADOW,
- ttm_bo_type_kernel,
- bo->tbo.resv, &bo->shadow);
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = byte_align;
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+ bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_SHADOW;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = bo->tbo.resv;
+
+ r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo);
mutex_lock(&adev->shadow_list_lock);
@@ -492,28 +498,26 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
return r;
}
-int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
- int byte_align, u32 domain,
- u64 flags, enum ttm_bo_type type,
- struct reservation_object *resv,
+int amdgpu_bo_create(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
struct amdgpu_bo **bo_ptr)
{
- uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
+ u64 flags = bp->flags;
int r;
- r = amdgpu_bo_do_create(adev, size, byte_align, domain,
- parent_flags, type, resv, bo_ptr);
+ bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
+ r = amdgpu_bo_do_create(adev, bp, bo_ptr);
if (r)
return r;
if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
- if (!resv)
+ if (!bp->resv)
WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
NULL));
- r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
+ r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr));
- if (!resv)
+ if (!bp->resv)
reservation_object_unlock((*bo_ptr)->tbo.resv);
if (r)
@@ -689,8 +693,21 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
/* A shared bo cannot be migrated to VRAM */
- if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
- return -EINVAL;
+ if (bo->prime_shared_count) {
+ if (domain & AMDGPU_GEM_DOMAIN_GTT)
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ else
+ return -EINVAL;
+ }
+
+ /* This assumes only APU display buffers are pinned with (VRAM|GTT).
+ * See function amdgpu_display_supported_domains()
+ */
+ if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
+ domain = AMDGPU_GEM_DOMAIN_VRAM;
+ if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ }
if (bo->pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type;
@@ -838,6 +855,13 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
return amdgpu_ttm_init(adev);
}
+int amdgpu_bo_late_init(struct amdgpu_device *adev)
+{
+ amdgpu_ttm_late_init(adev);
+
+ return 0;
+}
+
void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 546f77cb7..540e03f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -33,6 +33,16 @@
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
+struct amdgpu_bo_param {
+ unsigned long size;
+ int byte_align;
+ u32 domain;
+ u32 preferred_domain;
+ u64 flags;
+ enum ttm_bo_type type;
+ struct reservation_object *resv;
+};
+
/* bo virtual addresses in a vm */
struct amdgpu_bo_va_mapping {
struct amdgpu_bo_va *bo_va;
@@ -196,6 +206,27 @@ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
}
/**
+ * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
+ */
+static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
+ struct drm_mm_node *node = bo->tbo.mem.mm_node;
+ unsigned long pages_left;
+
+ if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
+ return false;
+
+ for (pages_left = bo->tbo.mem.num_pages; pages_left;
+ pages_left -= node->size, node++)
+ if (node->start < fpfn)
+ return true;
+
+ return false;
+}
+
+/**
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/
static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
@@ -203,10 +234,8 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
-int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
- int byte_align, u32 domain,
- u64 flags, enum ttm_bo_type type,
- struct reservation_object *resv,
+int amdgpu_bo_create(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align,
@@ -230,6 +259,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
int amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
int amdgpu_bo_init(struct amdgpu_device *adev);
+int amdgpu_bo_late_init(struct amdgpu_device *adev);
void amdgpu_bo_fini(struct amdgpu_device *adev);
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 361975c..b455da4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -77,6 +77,37 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
}
}
+/**
+ * DOC: power_dpm_state
+ *
+ * This is a legacy interface and is only provided for backwards compatibility.
+ * The amdgpu driver provides a sysfs API for adjusting certain power
+ * related parameters. The file power_dpm_state is used for this.
+ * It accepts the following arguments:
+ * - battery
+ * - balanced
+ * - performance
+ *
+ * battery
+ *
+ * On older GPUs, the vbios provided a special power state for battery
+ * operation. Selecting battery switched to this state. This is no
+ * longer provided on newer GPUs so the option does nothing in that case.
+ *
+ * balanced
+ *
+ * On older GPUs, the vbios provided a special power state for balanced
+ * operation. Selecting balanced switched to this state. This is no
+ * longer provided on newer GPUs so the option does nothing in that case.
+ *
+ * performance
+ *
+ * On older GPUs, the vbios provided a special power state for performance
+ * operation. Selecting performance switched to this state. This is no
+ * longer provided on newer GPUs so the option does nothing in that case.
+ *
+ */
+
static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -131,6 +162,59 @@ fail:
return count;
}
+
+/**
+ * DOC: power_dpm_force_performance_level
+ *
+ * The amdgpu driver provides a sysfs API for adjusting certain power
+ * related parameters. The file power_dpm_force_performance_level is
+ * used for this. It accepts the following arguments:
+ * - auto
+ * - low
+ * - high
+ * - manual
+ * - GPU fan
+ * - profile_standard
+ * - profile_min_sclk
+ * - profile_min_mclk
+ * - profile_peak
+ *
+ * auto
+ *
+ * When auto is selected, the driver will attempt to dynamically select
+ * the optimal power profile for current conditions in the driver.
+ *
+ * low
+ *
+ * When low is selected, the clocks are forced to the lowest power state.
+ *
+ * high
+ *
+ * When high is selected, the clocks are forced to the highest power state.
+ *
+ * manual
+ *
+ * When manual is selected, the user can manually adjust which power states
+ * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
+ * and pp_dpm_pcie files and adjust the power state transition heuristics
+ * via the pp_power_profile_mode sysfs file.
+ *
+ * profile_standard
+ * profile_min_sclk
+ * profile_min_mclk
+ * profile_peak
+ *
+ * When the profiling modes are selected, clock and power gating are
+ * disabled and the clocks are set for different profiling cases. This
+ * mode is recommended for profiling specific work loads where you do
+ * not want clock or power gating for clock fluctuation to interfere
+ * with your results. profile_standard sets the clocks to a fixed clock
+ * level which varies from asic to asic. profile_min_sclk forces the sclk
+ * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
+ * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
+ *
+ */
+
static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -324,6 +408,17 @@ fail:
return count;
}
+/**
+ * DOC: pp_table
+ *
+ * The amdgpu driver provides a sysfs API for uploading new powerplay
+ * tables. The file pp_table is used for this. Reading the file
+ * will dump the current power play table. Writing to the file
+ * will attempt to upload a new powerplay table and re-initialize
+ * powerplay using that new table.
+ *
+ */
+
static ssize_t amdgpu_get_pp_table(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -360,6 +455,29 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
return count;
}
+/**
+ * DOC: pp_od_clk_voltage
+ *
+ * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
+ * in each power level within a power state. The pp_od_clk_voltage is used for
+ * this.
+ *
+ * Reading the file will display:
+ * - a list of engine clock levels and voltages labeled OD_SCLK
+ * - a list of memory clock levels and voltages labeled OD_MCLK
+ * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
+ *
+ * To manually adjust these settings, first select manual using
+ * power_dpm_force_performance_level. Enter a new value for each
+ * level by writing a string that contains "s/m level clock voltage" to
+ * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
+ * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
+ * 810 mV. When you have edited all of the states as needed, write
+ * "c" (commit) to the file to commit your changes. If you want to reset to the
+ * default power levels, write "r" (reset) to the file to reset them.
+ *
+ */
+
static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -437,6 +555,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (adev->powerplay.pp_funcs->print_clock_levels) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
return size;
} else {
return snprintf(buf, PAGE_SIZE, "\n");
@@ -444,6 +563,23 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
}
+/**
+ * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
+ *
+ * The amdgpu driver provides a sysfs API for adjusting what power levels
+ * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
+ * and pp_dpm_pcie are used for this.
+ *
+ * Reading back the files will show you the available power levels within
+ * the power state and the clock information for those levels.
+ *
+ * To manually adjust these states, first select manual using
+ * power_dpm_force_performance_level.
+ * Secondly,Enter a new value for each level by inputing a string that
+ * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
+ * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6.
+ */
+
static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -466,23 +602,27 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
- uint32_t i, mask = 0;
- char sub_str[2];
+ uint32_t mask = 0;
+ char *sub_str = NULL;
+ char *tmp;
+ char buf_cpy[count];
+ const char delimiter[3] = {' ', '\n', '\0'};
- for (i = 0; i < strlen(buf); i++) {
- if (*(buf + i) == '\n')
- continue;
- sub_str[0] = *(buf + i);
- sub_str[1] = '\0';
- ret = kstrtol(sub_str, 0, &level);
+ memcpy(buf_cpy, buf, count+1);
+ tmp = buf_cpy;
+ while (tmp[0]) {
+ sub_str = strsep(&tmp, delimiter);
+ if (strlen(sub_str)) {
+ ret = kstrtol(sub_str, 0, &level);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+ mask |= 1 << level;
+ } else
+ break;
}
-
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
@@ -512,21 +652,26 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
- uint32_t i, mask = 0;
- char sub_str[2];
+ uint32_t mask = 0;
+ char *sub_str = NULL;
+ char *tmp;
+ char buf_cpy[count];
+ const char delimiter[3] = {' ', '\n', '\0'};
- for (i = 0; i < strlen(buf); i++) {
- if (*(buf + i) == '\n')
- continue;
- sub_str[0] = *(buf + i);
- sub_str[1] = '\0';
- ret = kstrtol(sub_str, 0, &level);
+ memcpy(buf_cpy, buf, count+1);
+ tmp = buf_cpy;
+ while (tmp[0]) {
+ sub_str = strsep(&tmp, delimiter);
+ if (strlen(sub_str)) {
+ ret = kstrtol(sub_str, 0, &level);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+ mask |= 1 << level;
+ } else
+ break;
}
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
@@ -557,21 +702,27 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
- uint32_t i, mask = 0;
- char sub_str[2];
+ uint32_t mask = 0;
+ char *sub_str = NULL;
+ char *tmp;
+ char buf_cpy[count];
+ const char delimiter[3] = {' ', '\n', '\0'};
- for (i = 0; i < strlen(buf); i++) {
- if (*(buf + i) == '\n')
- continue;
- sub_str[0] = *(buf + i);
- sub_str[1] = '\0';
- ret = kstrtol(sub_str, 0, &level);
+ memcpy(buf_cpy, buf, count+1);
+ tmp = buf_cpy;
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
+ while (tmp[0]) {
+ sub_str = strsep(&tmp, delimiter);
+ if (strlen(sub_str)) {
+ ret = kstrtol(sub_str, 0, &level);
+
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+ mask |= 1 << level;
+ } else
+ break;
}
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
@@ -668,6 +819,26 @@ fail:
return count;
}
+/**
+ * DOC: pp_power_profile_mode
+ *
+ * The amdgpu driver provides a sysfs API for adjusting the heuristics
+ * related to switching between power levels in a power state. The file
+ * pp_power_profile_mode is used for this.
+ *
+ * Reading this file outputs a list of all of the predefined power profiles
+ * and the relevant heuristics settings for that profile.
+ *
+ * To select a profile or create a custom profile, first select manual using
+ * power_dpm_force_performance_level. Writing the number of a predefined
+ * profile to pp_power_profile_mode will enable those heuristics. To
+ * create a custom set of heuristics, write a string of numbers to the file
+ * starting with the number of the custom profile along with a setting
+ * for each heuristic parameter. Due to differences across asic families
+ * the heuristic parameters vary from family to family.
+ *
+ */
+
static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1020,8 +1191,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
struct drm_device *ddev = adev->ddev;
- struct pp_gpu_power query = {0};
- int r, size = sizeof(query);
+ u32 query = 0;
+ int r, size = sizeof(u32);
unsigned uw;
/* Can't get power when the card is off */
@@ -1041,7 +1212,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
return r;
/* convert to microwatts */
- uw = (query.average_gpu_power >> 8) * 1000000;
+ uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
return snprintf(buf, PAGE_SIZE, "%u\n", uw);
}
@@ -1109,6 +1280,46 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
return count;
}
+
+/**
+ * DOC: hwmon
+ *
+ * The amdgpu driver exposes the following sensor interfaces:
+ * - GPU temperature (via the on-die sensor)
+ * - GPU voltage
+ * - Northbridge voltage (APUs only)
+ * - GPU power
+ * - GPU fan
+ *
+ * hwmon interfaces for GPU temperature:
+ * - temp1_input: the on die GPU temperature in millidegrees Celsius
+ * - temp1_crit: temperature critical max value in millidegrees Celsius
+ * - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
+ *
+ * hwmon interfaces for GPU voltage:
+ * - in0_input: the voltage on the GPU in millivolts
+ * - in1_input: the voltage on the Northbridge in millivolts
+ *
+ * hwmon interfaces for GPU power:
+ * - power1_average: average power used by the GPU in microWatts
+ * - power1_cap_min: minimum cap supported in microWatts
+ * - power1_cap_max: maximum cap supported in microWatts
+ * - power1_cap: selected power cap in microWatts
+ *
+ * hwmon interfaces for GPU fan:
+ * - pwm1: pulse width modulation fan level (0-255)
+ * - pwm1_enable: pulse width modulation fan control method
+ * 0: no fan speed control
+ * 1: manual fan speed control using pwm interface
+ * 2: automatic fan speed control
+ * - pwm1_min: pulse width modulation fan control minimum level (0)
+ * - pwm1_max: pulse width modulation fan control maximum level (255)
+ * - fan1_input: fan speed in RPM
+ *
+ * You can use hwmon tools like sensors to view this information on your system.
+ *
+ */
+
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
@@ -1153,19 +1364,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
- /* handle non-powerplay limitations */
- if (!adev->powerplay.pp_handle) {
- /* Skip fan attributes if fan is not present */
- if (adev->pm.no_fan &&
- (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
- /* requires powerplay */
- if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
- return 0;
- }
+
+ /* Skip fan attributes if fan is not present */
+ if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_input.dev_attr.attr))
+ return 0;
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
@@ -1658,9 +1864,6 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
{
- struct drm_device *ddev = adev->ddev;
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
int i = 0;
if (!adev->pm.dpm_enabled)
@@ -1676,21 +1879,25 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
}
if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ if (!amdgpu_device_has_dc_support(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_dpm_get_active_displays(adev);
+ adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs;
+ adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
+ adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
+ if (adev->pm.pm_display_cfg.vrefresh > 120)
+ adev->pm.pm_display_cfg.min_vblank_time = 0;
+ if (adev->powerplay.pp_funcs->display_configuration_change)
+ adev->powerplay.pp_funcs->display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
+ mutex_unlock(&adev->pm.mutex);
+ }
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
} else {
mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.new_active_crtcs = 0;
- adev->pm.dpm.new_active_crtc_count = 0;
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (amdgpu_crtc->enabled) {
- adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
- adev->pm.dpm.new_active_crtc_count++;
- }
- }
- }
+ amdgpu_dpm_get_active_displays(adev);
/* update battery/ac status */
if (power_supply_is_system_supplied() > 0)
adev->pm.dpm.ac_power = true;
@@ -1711,7 +1918,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
{
uint32_t value;
- struct pp_gpu_power query = {0};
+ uint32_t query = 0;
int size;
/* sanity check PP is enabled */
@@ -1734,17 +1941,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
seq_printf(m, "\t%u mV (VDDGFX)\n", value);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
seq_printf(m, "\t%u mV (VDDNB)\n", value);
- size = sizeof(query);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) {
- seq_printf(m, "\t%u.%u W (VDDC)\n", query.vddc_power >> 8,
- query.vddc_power & 0xff);
- seq_printf(m, "\t%u.%u W (VDDCI)\n", query.vddci_power >> 8,
- query.vddci_power & 0xff);
- seq_printf(m, "\t%u.%u W (max GPU)\n", query.max_gpu_power >> 8,
- query.max_gpu_power & 0xff);
- seq_printf(m, "\t%u.%u W (average GPU)\n", query.average_gpu_power >> 8,
- query.average_gpu_power & 0xff);
- }
+ size = sizeof(uint32_t);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
+ seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
size = sizeof(value);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 4b584cb7..4683626 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -102,12 +102,18 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct reservation_object *resv = attach->dmabuf->resv;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_bo *bo;
+ struct amdgpu_bo_param bp;
int ret;
+ memset(&bp, 0, sizeof(bp));
+ bp.size = attach->dmabuf->size;
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_CPU;
+ bp.flags = 0;
+ bp.type = ttm_bo_type_sg;
+ bp.resv = resv;
ww_mutex_lock(&resv->lock, NULL);
- ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg,
- resv, &bo);
+ ret = amdgpu_bo_create(adev, &bp, &bo);
if (ret)
goto error;
@@ -209,7 +215,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { true, false };
- u32 domain = amdgpu_display_framebuffer_domains(adev);
+ u32 domain = amdgpu_display_supported_domains(adev);
int ret;
bool reads = (direction == DMA_BIDIRECTIONAL ||
direction == DMA_FROM_DEVICE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index d5f526f..49cad08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -459,6 +459,26 @@ void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring)
spin_unlock(&adev->ring_lru_list_lock);
}
+/**
+ * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
+ *
+ * @adev: amdgpu_device pointer
+ * @reg0: register to write
+ * @reg1: register to wait on
+ * @ref: reference value to write/wait on
+ * @mask: mask to wait on
+ *
+ * Helper for rings that don't support write and wait in a
+ * single oneshot packet.
+ */
+void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask)
+{
+ amdgpu_ring_emit_wreg(ring, reg0, ref);
+ amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
+}
+
/*
* Debugfs info
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 1a59118..4f8dac2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -42,6 +42,7 @@
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
+#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
enum amdgpu_ring_type {
AMDGPU_RING_TYPE_GFX,
@@ -90,7 +91,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
unsigned irq_type);
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
+ unsigned flags);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
void amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
@@ -154,6 +156,9 @@ struct amdgpu_ring_funcs {
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask);
+ void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
/* priority functions */
void (*set_priority) (struct amdgpu_ring *ring,
@@ -228,6 +233,10 @@ int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
int *blacklist, int num_blacklist,
bool lru_pipe_order, struct amdgpu_ring **ring);
void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
+ uint32_t reg0, uint32_t val0,
+ uint32_t reg1, uint32_t val1);
+
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{
int i = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 2dbe875..d167e8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -33,6 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_bo *vram_obj = NULL;
struct amdgpu_bo **gtt_obj = NULL;
+ struct amdgpu_bo_param bp;
uint64_t gart_addr, vram_addr;
unsigned n, size;
int i, r;
@@ -58,9 +59,15 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = 1;
goto out_cleanup;
}
-
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 0,
- ttm_bo_type_kernel, NULL, &vram_obj);
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ bp.flags = 0;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+
+ r = amdgpu_bo_create(adev, &bp, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -79,9 +86,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
void **vram_start, **vram_end;
struct dma_fence *fence = NULL;
- r = amdgpu_bo_create(adev, size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT, 0,
- ttm_bo_type_kernel, NULL, gtt_obj + i);
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+ r = amdgpu_bo_create(adev, &bp, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 532263a..e96e26d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -275,7 +275,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
),
TP_fast_assign(
- __entry->bo = bo_va->base.bo;
+ __entry->bo = bo_va ? bo_va->base.bo : NULL;
__entry->start = mapping->start;
__entry->last = mapping->last;
__entry->offset = mapping->offset;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c713d30..69a2b25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -111,7 +111,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
ring = adev->mman.buffer_funcs_ring;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
- rq, amdgpu_sched_jobs, NULL);
+ rq, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move run queue.\n");
goto error_entity;
@@ -223,20 +223,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
if (!adev->mman.buffer_funcs_enabled) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
- !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
- unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
- struct drm_mm_node *node = bo->mem.mm_node;
- unsigned long pages_left;
-
- for (pages_left = bo->mem.num_pages;
- pages_left;
- pages_left -= node->size, node++) {
- if (node->start < fpfn)
- break;
- }
-
- if (!pages_left)
- goto gtt;
+ !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
+ amdgpu_bo_in_cpu_visible_vram(abo)) {
/* Try evicting to the CPU inaccessible part of VRAM
* first, but only set GTT as busy placement, so this
@@ -245,12 +233,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
*/
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT);
- abo->placements[0].fpfn = fpfn;
+ abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
abo->placements[0].lpfn = 0;
abo->placement.busy_placement = &abo->placements[1];
abo->placement.num_busy_placement = 1;
} else {
-gtt:
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
}
break;
@@ -856,6 +843,45 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
sg_free_table(ttm->sg);
}
+int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
+ struct ttm_buffer_object *tbo,
+ uint64_t flags)
+{
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
+ struct ttm_tt *ttm = tbo->ttm;
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ int r;
+
+ if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
+ uint64_t page_idx = 1;
+
+ r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
+ ttm->pages, gtt->ttm.dma_address, flags);
+ if (r)
+ goto gart_bind_fail;
+
+ /* Patch mtype of the second part BO */
+ flags &= ~AMDGPU_PTE_MTYPE_MASK;
+ flags |= AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_NC);
+
+ r = amdgpu_gart_bind(adev,
+ gtt->offset + (page_idx << PAGE_SHIFT),
+ ttm->num_pages - page_idx,
+ &ttm->pages[page_idx],
+ &(gtt->ttm.dma_address[page_idx]), flags);
+ } else {
+ r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+ ttm->pages, gtt->ttm.dma_address, flags);
+ }
+
+gart_bind_fail:
+ if (r)
+ DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ ttm->num_pages, gtt->offset);
+
+ return r;
+}
+
static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
@@ -929,8 +955,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
gtt->offset = (u64)tmp.start << PAGE_SHIFT;
- r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages,
- bo->ttm->pages, gtt->ttm.dma_address, flags);
+ r = amdgpu_ttm_gart_bind(adev, bo, flags);
if (unlikely(r)) {
ttm_bo_mem_put(bo, &tmp);
return r;
@@ -947,19 +972,15 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
- struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
uint64_t flags;
int r;
- if (!gtt)
+ if (!tbo->ttm)
return 0;
- flags = amdgpu_ttm_tt_pte_flags(adev, &gtt->ttm.ttm, &tbo->mem);
- r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
- gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
- if (r)
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
- gtt->ttm.ttm.num_pages, gtt->offset);
+ flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
+ r = amdgpu_ttm_gart_bind(adev, tbo, flags);
+
return r;
}
@@ -1349,6 +1370,7 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
{
struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_param bp;
int r = 0;
int i;
u64 vram_size = adev->gmc.visible_vram_size;
@@ -1356,17 +1378,21 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
u64 size = adev->fw_vram_usage.size;
struct amdgpu_bo *bo;
+ memset(&bp, 0, sizeof(bp));
+ bp.size = adev->fw_vram_usage.size;
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
adev->fw_vram_usage.va = NULL;
adev->fw_vram_usage.reserved_bo = NULL;
if (adev->fw_vram_usage.size > 0 &&
adev->fw_vram_usage.size <= vram_size) {
- r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- ttm_bo_type_kernel, NULL,
+ r = amdgpu_bo_create(adev, &bp,
&adev->fw_vram_usage.reserved_bo);
if (r)
goto error_create;
@@ -1474,12 +1500,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
- r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->stolen_vga_memory,
- NULL, NULL);
- if (r)
- return r;
+ if (adev->gmc.stolen_size) {
+ r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->stolen_vga_memory,
+ NULL, NULL);
+ if (r)
+ return r;
+ }
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
@@ -1548,13 +1576,17 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return 0;
}
+void amdgpu_ttm_late_init(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+}
+
void amdgpu_ttm_fini(struct amdgpu_device *adev)
{
if (!adev->mman.initialized)
return;
amdgpu_ttm_debugfs_fini(adev);
- amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev);
if (adev->mman.aper_base_kaddr)
iounmap(adev->mman.aper_base_kaddr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 6ea7de8..e969c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -77,6 +77,7 @@ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
int amdgpu_ttm_init(struct amdgpu_device *adev);
+void amdgpu_ttm_late_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
bool enable);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 5916cc2..75592bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -161,8 +161,38 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
- DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
+ DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
+ if (version_minor == 1) {
+ const struct rlc_firmware_header_v2_1 *v2_1 =
+ container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
+ DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n",
+ le32_to_cpu(v2_1->reg_list_format_direct_reg_list_length));
+ DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n",
+ le32_to_cpu(v2_1->save_restore_list_cntl_ucode_ver));
+ DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n",
+ le32_to_cpu(v2_1->save_restore_list_cntl_feature_ver));
+ DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n",
+ le32_to_cpu(v2_1->save_restore_list_cntl_size_bytes));
+ DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n",
+ le32_to_cpu(v2_1->save_restore_list_cntl_offset_bytes));
+ DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n",
+ le32_to_cpu(v2_1->save_restore_list_gpm_ucode_ver));
+ DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n",
+ le32_to_cpu(v2_1->save_restore_list_gpm_feature_ver));
+ DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n",
+ le32_to_cpu(v2_1->save_restore_list_gpm_size_bytes));
+ DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n",
+ le32_to_cpu(v2_1->save_restore_list_gpm_offset_bytes));
+ DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n",
+ le32_to_cpu(v2_1->save_restore_list_srm_ucode_ver));
+ DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n",
+ le32_to_cpu(v2_1->save_restore_list_srm_feature_ver));
+ DRM_DEBUG("save_restore_list_srm_size_bytes %u\n",
+ le32_to_cpu(v2_1->save_restore_list_srm_size_bytes));
+ DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n",
+ le32_to_cpu(v2_1->save_restore_list_srm_offset_bytes));
+ }
} else {
DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
}
@@ -265,6 +295,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
@@ -307,7 +338,10 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
(ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 &&
ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 &&
ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT &&
- ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT)) {
+ ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT &&
+ ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL &&
+ ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
+ ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) {
ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
@@ -329,6 +363,18 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
le32_to_cpu(header->ucode_array_offset_bytes) +
le32_to_cpu(cp_hdr->jt_offset) * 4),
ucode->ucode_size);
+ } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) {
+ ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
+ memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl,
+ ucode->ucode_size);
+ } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM) {
+ ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes;
+ memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_gpm,
+ ucode->ucode_size);
+ } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
+ ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
+ memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm,
+ ucode->ucode_size);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 30b5500..08e3857 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -98,6 +98,24 @@ struct rlc_firmware_header_v2_0 {
uint32_t reg_list_separate_array_offset_bytes; /* payload offset from the start of the header */
};
+/* version_major=2, version_minor=1 */
+struct rlc_firmware_header_v2_1 {
+ struct rlc_firmware_header_v2_0 v2_0;
+ uint32_t reg_list_format_direct_reg_list_length; /* length of direct reg list format array */
+ uint32_t save_restore_list_cntl_ucode_ver;
+ uint32_t save_restore_list_cntl_feature_ver;
+ uint32_t save_restore_list_cntl_size_bytes;
+ uint32_t save_restore_list_cntl_offset_bytes;
+ uint32_t save_restore_list_gpm_ucode_ver;
+ uint32_t save_restore_list_gpm_feature_ver;
+ uint32_t save_restore_list_gpm_size_bytes;
+ uint32_t save_restore_list_gpm_offset_bytes;
+ uint32_t save_restore_list_srm_ucode_ver;
+ uint32_t save_restore_list_srm_feature_ver;
+ uint32_t save_restore_list_srm_size_bytes;
+ uint32_t save_restore_list_srm_offset_bytes;
+};
+
/* version_major=1, version_minor=0 */
struct sdma_firmware_header_v1_0 {
struct common_firmware_header header;
@@ -148,6 +166,7 @@ union amdgpu_firmware_header {
struct gfx_firmware_header_v1_0 gfx;
struct rlc_firmware_header_v1_0 rlc;
struct rlc_firmware_header_v2_0 rlc_v2_0;
+ struct rlc_firmware_header_v2_1 rlc_v2_1;
struct sdma_firmware_header_v1_0 sdma;
struct sdma_firmware_header_v1_1 sdma_v1_1;
struct gpu_info_firmware_header_v1_0 gpu_info;
@@ -168,6 +187,9 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_CP_MEC2,
AMDGPU_UCODE_ID_CP_MEC2_JT,
AMDGPU_UCODE_ID_RLC_G,
+ AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
+ AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
+ AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
AMDGPU_UCODE_ID_STORAGE,
AMDGPU_UCODE_ID_SMC,
AMDGPU_UCODE_ID_UVD,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 627542b..de4d77a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -66,6 +66,7 @@
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
+#define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin"
#define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
#define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
@@ -109,6 +110,7 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
MODULE_FIRMWARE(FIRMWARE_POLARIS12);
+MODULE_FIRMWARE(FIRMWARE_VEGAM);
MODULE_FIRMWARE(FIRMWARE_VEGA10);
MODULE_FIRMWARE(FIRMWARE_VEGA12);
@@ -172,6 +174,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
case CHIP_VEGA12:
fw_name = FIRMWARE_VEGA12;
break;
+ case CHIP_VEGAM:
+ fw_name = FIRMWARE_VEGAM;
+ break;
default:
return -EINVAL;
}
@@ -237,7 +242,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
ring = &adev->uvd.ring;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
- rq, amdgpu_sched_jobs, NULL);
+ rq, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up UVD run queue.\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index a33804b..a86322f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -51,8 +51,9 @@
#define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
-#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
-#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
+#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
+#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
+#define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin"
#define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
#define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
@@ -71,6 +72,7 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
MODULE_FIRMWARE(FIRMWARE_POLARIS12);
+MODULE_FIRMWARE(FIRMWARE_VEGAM);
MODULE_FIRMWARE(FIRMWARE_VEGA10);
MODULE_FIRMWARE(FIRMWARE_VEGA12);
@@ -132,6 +134,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
case CHIP_POLARIS12:
fw_name = FIRMWARE_POLARIS12;
break;
+ case CHIP_VEGAM:
+ fw_name = FIRMWARE_VEGAM;
+ break;
case CHIP_VEGA10:
fw_name = FIRMWARE_VEGA10;
break;
@@ -181,7 +186,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
ring = &adev->vce.ring[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
- rq, amdgpu_sched_jobs, NULL);
+ rq, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCE run queue.\n");
return r;
@@ -755,6 +760,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
if (r)
goto out;
break;
+
+ case 0x0500000d: /* MV buffer */
+ r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
+ idx + 2, 0, 0);
+ if (r)
+ goto out;
+
+ r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8,
+ idx + 7, 0, 0);
+ if (r)
+ goto out;
+ break;
}
idx += len / 4;
@@ -860,6 +877,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out;
break;
+ case 0x0500000d: /* MV buffer */
+ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3,
+ idx + 2, *size, 0);
+ if (r)
+ goto out;
+
+ r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8,
+ idx + 7, *size / 12, 0);
+ if (r)
+ goto out;
+ break;
+
default:
DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
r = -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 58e4953..e5d234c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -105,7 +105,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
ring = &adev->vcn.ring_dec;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
- rq, amdgpu_sched_jobs, NULL);
+ rq, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCN dec run queue.\n");
return r;
@@ -114,7 +114,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
ring = &adev->vcn.ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
- rq, amdgpu_sched_jobs, NULL);
+ rq, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCN enc run queue.\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index da55a78..1a8f4e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -94,6 +94,36 @@ struct amdgpu_prt_cb {
struct dma_fence_cb cb;
};
+static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo)
+{
+ base->vm = vm;
+ base->bo = bo;
+ INIT_LIST_HEAD(&base->bo_list);
+ INIT_LIST_HEAD(&base->vm_status);
+
+ if (!bo)
+ return;
+ list_add_tail(&base->bo_list, &bo->va);
+
+ if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
+ return;
+
+ if (bo->preferred_domains &
+ amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
+ return;
+
+ /*
+ * we checked all the prerequisites, but it looks like this per vm bo
+ * is currently evicted. add the bo to the evicted list to make sure it
+ * is validated on next vm use to avoid fault.
+ * */
+ spin_lock(&vm->status_lock);
+ list_move_tail(&base->vm_status, &vm->evicted);
+ spin_unlock(&vm->status_lock);
+}
+
/**
* amdgpu_vm_level_shift - return the addr shift for each level
*
@@ -412,11 +442,16 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
struct amdgpu_bo *pt;
if (!entry->base.bo) {
- r = amdgpu_bo_create(adev,
- amdgpu_vm_bo_size(adev, level),
- AMDGPU_GPU_PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, flags,
- ttm_bo_type_kernel, resv, &pt);
+ struct amdgpu_bo_param bp;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.size = amdgpu_vm_bo_size(adev, level);
+ bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ bp.flags = flags;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = resv;
+ r = amdgpu_bo_create(adev, &bp, &pt);
if (r)
return r;
@@ -441,11 +476,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
*/
pt->parent = amdgpu_bo_ref(parent->base.bo);
- entry->base.vm = vm;
- entry->base.bo = pt;
- list_add_tail(&entry->base.bo_list, &pt->va);
+ amdgpu_vm_bo_base_init(&entry->base, vm, pt);
spin_lock(&vm->status_lock);
- list_add(&entry->base.vm_status, &vm->relocated);
+ list_move(&entry->base.vm_status, &vm->relocated);
spin_unlock(&vm->status_lock);
}
@@ -628,7 +661,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
if (vm_flush_needed || pasid_mapping_needed) {
- r = amdgpu_fence_emit(ring, &fence);
+ r = amdgpu_fence_emit(ring, &fence, 0);
if (r)
return r;
}
@@ -1557,6 +1590,15 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_lock(&vm->status_lock);
list_del_init(&bo_va->base.vm_status);
+
+ /* If the BO is not in its preferred location add it back to
+ * the evicted list so that it gets validated again on the
+ * next command submission.
+ */
+ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
+ !(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
+ list_add_tail(&bo_va->base.vm_status, &vm->evicted);
spin_unlock(&vm->status_lock);
list_splice_init(&bo_va->invalids, &bo_va->valids);
@@ -1827,36 +1869,12 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
if (bo_va == NULL) {
return NULL;
}
- bo_va->base.vm = vm;
- bo_va->base.bo = bo;
- INIT_LIST_HEAD(&bo_va->base.bo_list);
- INIT_LIST_HEAD(&bo_va->base.vm_status);
+ amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
- if (!bo)
- return bo_va;
-
- list_add_tail(&bo_va->base.bo_list, &bo->va);
-
- if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
- return bo_va;
-
- if (bo->preferred_domains &
- amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
- return bo_va;
-
- /*
- * We checked all the prerequisites, but it looks like this per VM BO
- * is currently evicted. add the BO to the evicted list to make sure it
- * is validated on next VM use to avoid fault.
- * */
- spin_lock(&vm->status_lock);
- list_move_tail(&bo_va->base.vm_status, &vm->evicted);
- spin_unlock(&vm->status_lock);
-
return bo_va;
}
@@ -2234,6 +2252,10 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
{
struct amdgpu_vm_bo_base *bo_base;
+ /* shadow bo doesn't have bo base, its validation needs its parent */
+ if (bo->parent && bo->parent->shadow == bo)
+ bo = bo->parent;
+
list_for_each_entry(bo_base, &bo->va, bo_list) {
struct amdgpu_vm *vm = bo_base->vm;
@@ -2355,6 +2377,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context, unsigned int pasid)
{
+ struct amdgpu_bo_param bp;
+ struct amdgpu_bo *root;
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT(adev) * 8);
unsigned ring_instance;
@@ -2380,7 +2404,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ring = adev->vm_manager.vm_pte_rings[ring_instance];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
r = drm_sched_entity_init(&ring->sched, &vm->entity,
- rq, amdgpu_sched_jobs, NULL);
+ rq, NULL);
if (r)
return r;
@@ -2409,24 +2433,28 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
flags |= AMDGPU_GEM_CREATE_SHADOW;
size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
- r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags,
- ttm_bo_type_kernel, NULL, &vm->root.base.bo);
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = align;
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ bp.flags = flags;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+ r = amdgpu_bo_create(adev, &bp, &root);
if (r)
goto error_free_sched_entity;
- r = amdgpu_bo_reserve(vm->root.base.bo, true);
+ r = amdgpu_bo_reserve(root, true);
if (r)
goto error_free_root;
- r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
+ r = amdgpu_vm_clear_bo(adev, vm, root,
adev->vm_manager.root_level,
vm->pte_support_ats);
if (r)
goto error_unreserve;
- vm->root.base.vm = vm;
- list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
- list_add_tail(&vm->root.base.vm_status, &vm->evicted);
+ amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
amdgpu_bo_unreserve(vm->root.base.bo);
if (pasid) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 30f0803..4cf6786 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -75,11 +75,12 @@ struct amdgpu_bo_list_entry;
/* PDE Block Fragment Size for VEGA10 */
#define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
-/* VEGA10 only */
+
+/* For GFX9 */
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
-/* For Raven */
+#define AMDGPU_MTYPE_NC 0
#define AMDGPU_MTYPE_CC 2
#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 47ef3e6..a266dcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -5903,7 +5903,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
pi->pcie_dpm_key_disabled = 0;
pi->thermal_sclk_dpm_enabled = 0;
- if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
+ if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
pi->caps_sclk_ds = true;
else
pi->caps_sclk_ds = false;
@@ -6255,7 +6255,7 @@ static int ci_dpm_late_init(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!amdgpu_dpm)
+ if (!adev->pm.dpm_enabled)
return 0;
/* init the sysfs and debugfs files late */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 0df2203..8ff4c60 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1735,6 +1735,12 @@ static void cik_invalidate_hdp(struct amdgpu_device *adev,
}
}
+static bool cik_need_full_reset(struct amdgpu_device *adev)
+{
+ /* change this when we support soft reset */
+ return true;
+}
+
static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
@@ -1748,6 +1754,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.get_config_memsize = &cik_get_config_memsize,
.flush_hdp = &cik_flush_hdp,
.invalidate_hdp = &cik_invalidate_hdp,
+ .need_full_reset = &cik_need_full_reset,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 452f88e..ada241b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1823,7 +1823,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
@@ -1842,18 +1841,15 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
return 0;
}
- if (atomic) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
+ if (atomic)
target_fb = fb;
- } else {
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ else
target_fb = crtc->primary->fb;
- }
/* If atomic, assume fb object is pinned & idle & fenced and
* just update base pointers
*/
- obj = amdgpu_fb->obj;
+ obj = target_fb->obj[0];
abo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
@@ -2043,8 +2039,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
@@ -2526,11 +2521,9 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
int r;
- struct amdgpu_framebuffer *amdgpu_fb;
struct amdgpu_bo *abo;
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index a7c1c58..a5b96ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -173,6 +173,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
ARRAY_SIZE(polaris11_golden_settings_a11));
break;
case CHIP_POLARIS10:
+ case CHIP_VEGAM:
amdgpu_device_program_register_sequence(adev,
polaris10_golden_settings_a11,
ARRAY_SIZE(polaris10_golden_settings_a11));
@@ -473,6 +474,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
num_crtc = 2;
break;
case CHIP_POLARIS10:
+ case CHIP_VEGAM:
num_crtc = 6;
break;
case CHIP_POLARIS11:
@@ -1445,6 +1447,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
adev->mode_info.audio.num_pins = 7;
break;
case CHIP_POLARIS10:
+ case CHIP_VEGAM:
adev->mode_info.audio.num_pins = 8;
break;
case CHIP_POLARIS11:
@@ -1862,7 +1865,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
@@ -1881,18 +1883,15 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
return 0;
}
- if (atomic) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
+ if (atomic)
target_fb = fb;
- } else {
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ else
target_fb = crtc->primary->fb;
- }
/* If atomic, assume fb object is pinned & idle & fenced and
* just update base pointers
*/
- obj = amdgpu_fb->obj;
+ obj = target_fb->obj[0];
abo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
@@ -2082,8 +2081,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
@@ -2253,7 +2251,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
if ((adev->asic_type == CHIP_POLARIS10) ||
(adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12)) {
+ (adev->asic_type == CHIP_POLARIS12) ||
+ (adev->asic_type == CHIP_VEGAM)) {
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -2601,11 +2600,9 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
int r;
- struct amdgpu_framebuffer *amdgpu_fb;
struct amdgpu_bo *abo;
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
@@ -2673,7 +2670,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
if ((adev->asic_type == CHIP_POLARIS10) ||
(adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12)) {
+ (adev->asic_type == CHIP_POLARIS12) ||
+ (adev->asic_type == CHIP_VEGAM)) {
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
int encoder_mode =
@@ -2830,6 +2828,7 @@ static int dce_v11_0_early_init(void *handle)
adev->mode_info.num_dig = 9;
break;
case CHIP_POLARIS10:
+ case CHIP_VEGAM:
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
@@ -2949,7 +2948,8 @@ static int dce_v11_0_hw_init(void *handle)
amdgpu_atombios_encoder_init_dig(adev);
if ((adev->asic_type == CHIP_POLARIS10) ||
(adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12)) {
+ (adev->asic_type == CHIP_POLARIS12) ||
+ (adev->asic_type == CHIP_VEGAM)) {
amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
amdgpu_atombios_crtc_set_dce_clock(adev, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 9f67b7f..394cc1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1780,7 +1780,6 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
@@ -1798,18 +1797,15 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
return 0;
}
- if (atomic) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
+ if (atomic)
target_fb = fb;
- } else {
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ else
target_fb = crtc->primary->fb;
- }
/* If atomic, assume fb object is pinned & idle & fenced and
* just update base pointers
*/
- obj = amdgpu_fb->obj;
+ obj = target_fb->obj[0];
abo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
@@ -1978,8 +1974,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
@@ -2414,11 +2409,9 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
int r;
- struct amdgpu_framebuffer *amdgpu_fb;
struct amdgpu_bo *abo;
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index f55422c..c9b9ab8 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1754,7 +1754,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_framebuffer *amdgpu_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
@@ -1773,18 +1772,15 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
return 0;
}
- if (atomic) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
+ if (atomic)
target_fb = fb;
- } else {
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ else
target_fb = crtc->primary->fb;
- }
/* If atomic, assume fb object is pinned & idle & fenced and
* just update base pointers
*/
- obj = amdgpu_fb->obj;
+ obj = target_fb->obj[0];
abo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
@@ -1955,8 +1951,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
@@ -2430,11 +2425,9 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
int r;
- struct amdgpu_framebuffer *amdgpu_fb;
struct amdgpu_bo *abo;
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index b51f05d..de7be3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -168,11 +168,9 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
int r;
- struct amdgpu_framebuffer *amdgpu_fb;
struct amdgpu_bo *abo;
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
@@ -329,7 +327,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
return 0;
}
-static int dce_virtual_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status dce_virtual_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
@@ -462,8 +460,9 @@ static int dce_virtual_hw_init(void *handle)
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
- case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_VEGAM:
dce_v11_0_disable_dce(adev);
break;
case CHIP_TOPAZ:
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
new file mode 100644
index 0000000..4ffda99
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "df_v1_7.h"
+
+#include "df/df_1_7_default.h"
+#include "df/df_1_7_offset.h"
+#include "df/df_1_7_sh_mask.h"
+
+static u32 df_v1_7_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
+
+static void df_v1_7_init (struct amdgpu_device *adev)
+{
+}
+
+static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 tmp;
+
+ if (enable) {
+ tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl);
+ tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
+ WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp);
+ } else
+ WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,
+ mmFabricConfigAccessControl_DEFAULT);
+}
+
+static u32 df_v1_7_get_fb_channel_number(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
+ tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
+ tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+
+ return tmp;
+}
+
+static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
+{
+ int fb_channel_number;
+
+ fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
+
+ return df_v1_7_channel_number[fb_channel_number];
+}
+
+static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 tmp;
+
+ /* Put DF on broadcast mode */
+ adev->df_funcs->enable_broadcast_mode(adev, true);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
+ tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
+ tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
+ tmp |= DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY;
+ WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
+ } else {
+ tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
+ tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
+ tmp |= DF_V1_7_MGCG_DISABLE;
+ WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
+ }
+
+ /* Exit boradcast mode */
+ adev->df_funcs->enable_broadcast_mode(adev, false);
+}
+
+static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
+{
+ u32 tmp;
+
+ /* AMD_CG_SUPPORT_DF_MGCG */
+ tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
+ if (tmp & DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY)
+ *flags |= AMD_CG_SUPPORT_DF_MGCG;
+}
+
+const struct amdgpu_df_funcs df_v1_7_funcs = {
+ .init = df_v1_7_init,
+ .enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
+ .get_fb_channel_number = df_v1_7_get_fb_channel_number,
+ .get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
+ .update_medium_grain_clock_gating = df_v1_7_update_medium_grain_clock_gating,
+ .get_clockgating_state = df_v1_7_get_clockgating_state,
+};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h b/drivers/gpu/drm/amd/amdgpu/df_v1_7.h
index 214f370..7462110 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,33 +20,21 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#ifndef PP_SOC15_H
-#define PP_SOC15_H
-#include "soc15_hw_ip.h"
-#include "vega10_ip_offset.h"
+#ifndef __DF_V1_7_H__
+#define __DF_V1_7_H__
-inline static uint32_t soc15_get_register_offset(
- uint32_t hw_id,
- uint32_t inst,
- uint32_t segment,
- uint32_t offset)
+#include "soc15_common.h"
+enum DF_V1_7_MGCG
{
- uint32_t reg = 0;
+ DF_V1_7_MGCG_DISABLE = 0,
+ DF_V1_7_MGCG_ENABLE_00_CYCLE_DELAY =1,
+ DF_V1_7_MGCG_ENABLE_01_CYCLE_DELAY =2,
+ DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY =13,
+ DF_V1_7_MGCG_ENABLE_31_CYCLE_DELAY =14,
+ DF_V1_7_MGCG_ENABLE_63_CYCLE_DELAY =15
+};
- if (hw_id == THM_HWID)
- reg = THM_BASE.instance[inst].segment[segment] + offset;
- else if (hw_id == NBIF_HWID)
- reg = NBIF_BASE.instance[inst].segment[segment] + offset;
- else if (hw_id == MP1_HWID)
- reg = MP1_BASE.instance[inst].segment[segment] + offset;
- else if (hw_id == DF_HWID)
- reg = DF_BASE.instance[inst].segment[segment] + offset;
- else if (hw_id == GC_HWID)
- reg = GC_BASE.instance[inst].segment[segment] + offset;
- else if (hw_id == SMUIO_HWID)
- reg = SMUIO_BASE.instance[inst].segment[segment] + offset;
- return reg;
-}
+extern const struct amdgpu_df_funcs df_v1_7_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index e14263f..818874b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -125,18 +125,6 @@ MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
-MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
-
MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
@@ -149,6 +137,18 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
+
MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
@@ -161,6 +161,13 @@ MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
+MODULE_FIRMWARE("amdgpu/vegam_ce.bin");
+MODULE_FIRMWARE("amdgpu/vegam_pfp.bin");
+MODULE_FIRMWARE("amdgpu/vegam_me.bin");
+MODULE_FIRMWARE("amdgpu/vegam_mec.bin");
+MODULE_FIRMWARE("amdgpu/vegam_mec2.bin");
+MODULE_FIRMWARE("amdgpu/vegam_rlc.bin");
+
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{
{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -292,6 +299,37 @@ static const u32 tonga_mgcg_cgcg_init[] =
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
};
+static const u32 golden_settings_vegam_a11[] =
+{
+ mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
+ mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000,
+ mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
+ mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+ mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
+ mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a,
+ mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
+ mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
+ mmSQ_CONFIG, 0x07f80000, 0x01180000,
+ mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
+ mmTCC_CTRL, 0x00100000, 0xf31fff7f,
+ mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
+ mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+ mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
+ mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
+};
+
+static const u32 vegam_golden_common_all[] =
+{
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
+ mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+ mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
+};
+
static const u32 golden_settings_polaris11_a11[] =
{
mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
@@ -712,6 +750,14 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
tonga_golden_common_all,
ARRAY_SIZE(tonga_golden_common_all));
break;
+ case CHIP_VEGAM:
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_vegam_a11,
+ ARRAY_SIZE(golden_settings_vegam_a11));
+ amdgpu_device_program_register_sequence(adev,
+ vegam_golden_common_all,
+ ARRAY_SIZE(vegam_golden_common_all));
+ break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
amdgpu_device_program_register_sequence(adev,
@@ -918,17 +964,20 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_FIJI:
chip_name = "fiji";
break;
- case CHIP_POLARIS11:
- chip_name = "polaris11";
+ case CHIP_STONEY:
+ chip_name = "stoney";
break;
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
+ case CHIP_POLARIS11:
+ chip_name = "polaris11";
+ break;
case CHIP_POLARIS12:
chip_name = "polaris12";
break;
- case CHIP_STONEY:
- chip_name = "stoney";
+ case CHIP_VEGAM:
+ chip_name = "vegam";
break;
default:
BUG();
@@ -1770,6 +1819,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_POLARIS10:
+ case CHIP_VEGAM:
ret = amdgpu_atombios_get_gfx_info(adev);
if (ret)
return ret;
@@ -1957,12 +2007,13 @@ static int gfx_v8_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->asic_type) {
- case CHIP_FIJI:
case CHIP_TONGA:
+ case CHIP_CARRIZO:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- case CHIP_POLARIS10:
- case CHIP_CARRIZO:
+ case CHIP_VEGAM:
adev->gfx.mec.num_mec = 2;
break;
case CHIP_TOPAZ:
@@ -2323,6 +2374,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
break;
case CHIP_FIJI:
+ case CHIP_VEGAM:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
@@ -3504,6 +3556,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
{
switch (adev->asic_type) {
case CHIP_FIJI:
+ case CHIP_VEGAM:
*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
RB_XSEL2(1) | PKR_MAP(2) |
PKR_XSEL(1) | PKR_YSEL(1) |
@@ -4071,7 +4124,8 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
gfx_v8_0_init_power_gating(adev);
WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
} else if ((adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12)) {
+ (adev->asic_type == CHIP_POLARIS12) ||
+ (adev->asic_type == CHIP_VEGAM)) {
gfx_v8_0_init_csb(adev);
gfx_v8_0_init_save_restore_list(adev);
gfx_v8_0_enable_save_restore_machine(adev);
@@ -4146,7 +4200,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
if (adev->asic_type == CHIP_POLARIS11 ||
adev->asic_type == CHIP_POLARIS10 ||
- adev->asic_type == CHIP_POLARIS12) {
+ adev->asic_type == CHIP_POLARIS12 ||
+ adev->asic_type == CHIP_VEGAM) {
tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
tmp &= ~0x3;
WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
@@ -5498,7 +5553,8 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
bool enable)
{
if ((adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12))
+ (adev->asic_type == CHIP_POLARIS12) ||
+ (adev->asic_type == CHIP_VEGAM))
/* Send msg to SMU via Powerplay */
amdgpu_device_ip_set_powergating_state(adev,
AMD_IP_BLOCK_TYPE_SMC,
@@ -5588,6 +5644,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
else
@@ -6154,6 +6211,7 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index e5962e6..fc19118 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -41,7 +41,6 @@
#define GFX9_MEC_HPD_SIZE 2048
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
#define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
-#define GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH 34
#define mmPWR_MISC_CNTL_STATUS 0x0183
#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
@@ -185,6 +184,30 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000)
};
+static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
+{
+ mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+ mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+ mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+ mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+ mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+ mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+ mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+ mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
+};
+
+static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
+{
+ mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+ mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+ mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+ mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+ mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+ mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+ mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+ mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
+};
+
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -401,6 +424,27 @@ static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
kfree(adev->gfx.rlc.register_list_format);
}
+static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_1 *rlc_hdr;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
+ adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
+ adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
+ adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
+ adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
+ adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
+ adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
+ adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
+ adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
+ adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
+ adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
+ adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
+ adev->gfx.rlc.reg_list_format_direct_reg_list_length =
+ le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
+}
+
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
@@ -412,6 +456,8 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
const struct rlc_firmware_header_v2_0 *rlc_hdr;
unsigned int *tmp = NULL;
unsigned int i = 0;
+ uint16_t version_major;
+ uint16_t version_minor;
DRM_DEBUG("\n");
@@ -468,6 +514,12 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+
+ version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
+ version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
+ if (version_major == 2 && version_minor == 1)
+ adev->gfx.rlc.is_rlc_v2_1 = true;
+
adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
adev->gfx.rlc.save_and_restore_offset =
@@ -508,6 +560,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
+ if (adev->gfx.rlc.is_rlc_v2_1)
+ gfx_v9_0_init_rlc_ext_microcode(adev);
+
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
if (err)
@@ -566,6 +621,26 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ if (adev->gfx.rlc.is_rlc_v2_1) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
+ }
+
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
info->fw = adev->gfx.mec_fw;
@@ -1600,6 +1675,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
gfx_v9_0_setup_rb(adev);
gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
+ adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
@@ -1616,7 +1692,10 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
- tmp = adev->gmc.shared_aperture_start >> 48;
+ tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
+ (adev->gmc.private_aperture_start >> 48));
+ tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
+ (adev->gmc.shared_aperture_start >> 48));
WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
}
}
@@ -1708,55 +1787,42 @@ static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
adev->gfx.rlc.clear_state_size);
}
-static void gfx_v9_0_parse_ind_reg_list(int *register_list_format,
+static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
int indirect_offset,
int list_size,
int *unique_indirect_regs,
int *unique_indirect_reg_count,
- int max_indirect_reg_count,
int *indirect_start_offsets,
- int *indirect_start_offsets_count,
- int max_indirect_start_offsets_count)
+ int *indirect_start_offsets_count)
{
int idx;
- bool new_entry = true;
for (; indirect_offset < list_size; indirect_offset++) {
+ indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
+ *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
- if (new_entry) {
- new_entry = false;
- indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
- *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
- BUG_ON(*indirect_start_offsets_count >= max_indirect_start_offsets_count);
- }
+ while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
+ indirect_offset += 2;
- if (register_list_format[indirect_offset] == 0xFFFFFFFF) {
- new_entry = true;
- continue;
- }
+ /* look for the matching indice */
+ for (idx = 0; idx < *unique_indirect_reg_count; idx++) {
+ if (unique_indirect_regs[idx] ==
+ register_list_format[indirect_offset] ||
+ !unique_indirect_regs[idx])
+ break;
+ }
- indirect_offset += 2;
+ BUG_ON(idx >= *unique_indirect_reg_count);
- /* look for the matching indice */
- for (idx = 0; idx < *unique_indirect_reg_count; idx++) {
- if (unique_indirect_regs[idx] ==
- register_list_format[indirect_offset])
- break;
- }
+ if (!unique_indirect_regs[idx])
+ unique_indirect_regs[idx] = register_list_format[indirect_offset];
- if (idx >= *unique_indirect_reg_count) {
- unique_indirect_regs[*unique_indirect_reg_count] =
- register_list_format[indirect_offset];
- idx = *unique_indirect_reg_count;
- *unique_indirect_reg_count = *unique_indirect_reg_count + 1;
- BUG_ON(*unique_indirect_reg_count >= max_indirect_reg_count);
+ indirect_offset++;
}
-
- register_list_format[indirect_offset] = idx;
}
}
-static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
+static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
{
int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
int unique_indirect_reg_count = 0;
@@ -1765,7 +1831,7 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
int indirect_start_offsets_count = 0;
int list_size = 0;
- int i = 0;
+ int i = 0, j = 0;
u32 tmp = 0;
u32 *register_list_format =
@@ -1776,15 +1842,14 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
adev->gfx.rlc.reg_list_format_size_bytes);
/* setup unique_indirect_regs array and indirect_start_offsets array */
- gfx_v9_0_parse_ind_reg_list(register_list_format,
- GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH,
- adev->gfx.rlc.reg_list_format_size_bytes >> 2,
- unique_indirect_regs,
- &unique_indirect_reg_count,
- ARRAY_SIZE(unique_indirect_regs),
- indirect_start_offsets,
- &indirect_start_offsets_count,
- ARRAY_SIZE(indirect_start_offsets));
+ unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
+ gfx_v9_1_parse_ind_reg_list(register_list_format,
+ adev->gfx.rlc.reg_list_format_direct_reg_list_length,
+ adev->gfx.rlc.reg_list_format_size_bytes >> 2,
+ unique_indirect_regs,
+ &unique_indirect_reg_count,
+ indirect_start_offsets,
+ &indirect_start_offsets_count);
/* enable auto inc in case it is disabled */
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
@@ -1798,19 +1863,37 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
adev->gfx.rlc.register_restore[i]);
- /* load direct register */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 0);
- for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
- adev->gfx.rlc.register_restore[i]);
-
/* load indirect register */
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
adev->gfx.rlc.reg_list_format_start);
- for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
+
+ /* direct register portion */
+ for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
register_list_format[i]);
+ /* indirect register portion */
+ while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
+ if (register_list_format[i] == 0xFFFFFFFF) {
+ WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
+ continue;
+ }
+
+ WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
+ WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
+
+ for (j = 0; j < unique_indirect_reg_count; j++) {
+ if (register_list_format[i] == unique_indirect_regs[j]) {
+ WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
+ break;
+ }
+ }
+
+ BUG_ON(j >= unique_indirect_reg_count);
+
+ i++;
+ }
+
/* set save/restore list size */
list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
list_size = list_size >> 1;
@@ -1823,14 +1906,19 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
adev->gfx.rlc.starting_offsets_start);
for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
- indirect_start_offsets[i]);
+ indirect_start_offsets[i]);
/* load unique indirect regs*/
for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
- unique_indirect_regs[i] & 0x3FFFF);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
- unique_indirect_regs[i] >> 20);
+ if (unique_indirect_regs[i] != 0) {
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
+ + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
+ unique_indirect_regs[i] & 0x3FFFF);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
+ + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
+ unique_indirect_regs[i] >> 20);
+ }
}
kfree(register_list_format);
@@ -2010,6 +2098,9 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
{
+ if (!adev->gfx.rlc.is_rlc_v2_1)
+ return;
+
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_DMG |
@@ -2017,27 +2108,12 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
AMD_PG_SUPPORT_GDS |
AMD_PG_SUPPORT_RLC_SMU_HS)) {
gfx_v9_0_init_csb(adev);
- gfx_v9_0_init_rlc_save_restore_list(adev);
+ gfx_v9_1_init_rlc_save_restore_list(adev);
gfx_v9_0_enable_save_restore_machine(adev);
- if (adev->asic_type == CHIP_RAVEN) {
- WREG32(mmRLC_JUMP_TABLE_RESTORE,
- adev->gfx.rlc.cp_table_gpu_addr >> 8);
- gfx_v9_0_init_gfx_power_gating(adev);
-
- if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
- gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
- gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
- } else {
- gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
- gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
- }
-
- if (adev->pg_flags & AMD_PG_SUPPORT_CP)
- gfx_v9_0_enable_cp_power_gating(adev, true);
- else
- gfx_v9_0_enable_cp_power_gating(adev, false);
- }
+ WREG32(mmRLC_JUMP_TABLE_RESTORE,
+ adev->gfx.rlc.cp_table_gpu_addr >> 8);
+ gfx_v9_0_init_gfx_power_gating(adev);
}
}
@@ -3061,6 +3137,9 @@ static int gfx_v9_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
+
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -3279,6 +3358,11 @@ static int gfx_v9_0_late_init(void *handle)
if (r)
return r;
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+ if (r)
+ return r;
+
return 0;
}
@@ -3339,8 +3423,7 @@ static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- /* TODO: double check if we need to perform under safe mdoe */
- /* gfx_v9_0_enter_rlc_safe_mode(adev); */
+ gfx_v9_0_enter_rlc_safe_mode(adev);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
@@ -3351,7 +3434,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
}
- /* gfx_v9_0_exit_rlc_safe_mode(adev); */
+ gfx_v9_0_exit_rlc_safe_mode(adev);
}
static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
@@ -3742,7 +3825,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
amdgpu_ring_write(ring, header);
-BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
+ BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
@@ -3774,13 +3857,16 @@ static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+ bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
/* RELEASE_MEM - flush caches, send int */
amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
- amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
- EOP_TC_ACTION_EN |
- EOP_TC_WB_ACTION_EN |
- EOP_TC_MD_ACTION_EN |
+ amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
+ EOP_TC_NC_ACTION_EN) :
+ (EOP_TCL1_ACTION_EN |
+ EOP_TC_ACTION_EN |
+ EOP_TC_WB_ACTION_EN |
+ EOP_TC_MD_ACTION_EN)) |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
EVENT_INDEX(5)));
amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
@@ -4137,6 +4223,20 @@ static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
}
+static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask)
+{
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+
+ if (amdgpu_sriov_vf(ring->adev))
+ gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
+ ref, mask, 0x20);
+ else
+ amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
+ ref, mask);
+}
+
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
@@ -4458,6 +4558,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_tmz = gfx_v9_0_ring_emit_tmz,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -4492,6 +4593,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.set_priority = gfx_v9_0_ring_set_priority_compute,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -4522,6 +4624,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.emit_rreg = gfx_v9_0_ring_emit_rreg,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
};
static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 5617cf6..79f9ac2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -819,12 +819,33 @@ static int gmc_v6_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_bo_late_init(adev);
+
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
else
return 0;
}
+static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
+{
+ u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
+ unsigned size;
+
+ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+ size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+ } else {
+ u32 viewport = RREG32(mmVIEWPORT_SIZE);
+ size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
+ 4);
+ }
+ /* return 0 if the pre-OS buffer uses up most of vram */
+ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
+ return 0;
+ return size;
+}
+
static int gmc_v6_0_sw_init(void *handle)
{
int r;
@@ -851,8 +872,6 @@ static int gmc_v6_0_sw_init(void *handle)
adev->gmc.mc_mask = 0xffffffffffULL;
- adev->gmc.stolen_size = 256 * 1024;
-
adev->need_dma32 = false;
dma_bits = adev->need_dma32 ? 32 : 40;
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
@@ -878,6 +897,8 @@ static int gmc_v6_0_sw_init(void *handle)
if (r)
return r;
+ adev->gmc.stolen_size = gmc_v6_0_get_vbios_fb_size(adev);
+
r = amdgpu_bo_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 80054f3..7147bfe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -958,12 +958,33 @@ static int gmc_v7_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_bo_late_init(adev);
+
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
else
return 0;
}
+static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
+{
+ u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
+ unsigned size;
+
+ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+ size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+ } else {
+ u32 viewport = RREG32(mmVIEWPORT_SIZE);
+ size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
+ 4);
+ }
+ /* return 0 if the pre-OS buffer uses up most of vram */
+ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
+ return 0;
+ return size;
+}
+
static int gmc_v7_0_sw_init(void *handle)
{
int r;
@@ -998,8 +1019,6 @@ static int gmc_v7_0_sw_init(void *handle)
*/
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
- adev->gmc.stolen_size = 256 * 1024;
-
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits
@@ -1030,6 +1049,8 @@ static int gmc_v7_0_sw_init(void *handle)
if (r)
return r;
+ adev->gmc.stolen_size = gmc_v7_0_get_vbios_fb_size(adev);
+
/* Memory manager */
r = amdgpu_bo_init(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index d71d4cb..1edbe6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -138,6 +138,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
amdgpu_device_program_register_sequence(adev,
golden_settings_polaris11_a11,
ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -231,6 +232,7 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_FIJI:
case CHIP_CARRIZO:
case CHIP_STONEY:
+ case CHIP_VEGAM:
return 0;
default: BUG();
}
@@ -567,9 +569,10 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
/* set the gart size */
if (amdgpu_gart_size == -1) {
switch (adev->asic_type) {
- case CHIP_POLARIS11: /* all engines support GPUVM */
case CHIP_POLARIS10: /* all engines support GPUVM */
+ case CHIP_POLARIS11: /* all engines support GPUVM */
case CHIP_POLARIS12: /* all engines support GPUVM */
+ case CHIP_VEGAM: /* all engines support GPUVM */
default:
adev->gmc.gart_size = 256ULL << 20;
break;
@@ -1049,12 +1052,33 @@ static int gmc_v8_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_bo_late_init(adev);
+
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
else
return 0;
}
+static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
+{
+ u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
+ unsigned size;
+
+ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+ size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+ } else {
+ u32 viewport = RREG32(mmVIEWPORT_SIZE);
+ size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
+ 4);
+ }
+ /* return 0 if the pre-OS buffer uses up most of vram */
+ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
+ return 0;
+ return size;
+}
+
#define mmMC_SEQ_MISC0_FIJI 0xA71
static int gmc_v8_0_sw_init(void *handle)
@@ -1068,7 +1092,8 @@ static int gmc_v8_0_sw_init(void *handle)
} else {
u32 tmp;
- if (adev->asic_type == CHIP_FIJI)
+ if ((adev->asic_type == CHIP_FIJI) ||
+ (adev->asic_type == CHIP_VEGAM))
tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
else
tmp = RREG32(mmMC_SEQ_MISC0);
@@ -1096,8 +1121,6 @@ static int gmc_v8_0_sw_init(void *handle)
*/
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
- adev->gmc.stolen_size = 256 * 1024;
-
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits
@@ -1128,6 +1151,8 @@ static int gmc_v8_0_sw_init(void *handle)
if (r)
return r;
+ adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev);
+
/* Memory manager */
r = amdgpu_bo_init(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e687363..6cccf0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -43,19 +43,13 @@
#include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h"
-#define mmDF_CS_AON0_DramBaseAddress0 0x0044
-#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
-//DF_CS_AON0_DramBaseAddress0
-#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
-#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
-#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
-#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
-#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
-#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
-#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
-#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
-#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
-#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
+/* add these here since we already include dce12 headers and these are for DCN */
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
/* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
#define AMDGPU_NUM_OF_VMIDS 8
@@ -385,11 +379,9 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
upper_32_bits(pd_addr));
- amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
-
- /* wait for the invalidate to complete */
- amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
- 1 << vmid, 1 << vmid);
+ amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
+ hub->vm_inv_eng0_ack + eng,
+ req, 1 << vmid);
return pd_addr;
}
@@ -556,8 +548,7 @@ static int gmc_v9_0_early_init(void *handle)
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end =
adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
- adev->gmc.private_aperture_start =
- adev->gmc.shared_aperture_end + 1;
+ adev->gmc.private_aperture_start = 0x1000000000000000ULL;
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
@@ -659,6 +650,11 @@ static int gmc_v9_0_late_init(void *handle)
unsigned i;
int r;
+ /*
+ * TODO - Uncomment once GART corruption issue is fixed.
+ */
+ /* amdgpu_bo_late_init(adev); */
+
for(i = 0; i < adev->num_rings; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
unsigned vmhub = ring->funcs->vmhub;
@@ -714,7 +710,6 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
{
- u32 tmp;
int chansize, numchan;
int r;
@@ -727,39 +722,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
else
chansize = 128;
- tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
- tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
- tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
- switch (tmp) {
- case 0:
- default:
- numchan = 1;
- break;
- case 1:
- numchan = 2;
- break;
- case 2:
- numchan = 0;
- break;
- case 3:
- numchan = 4;
- break;
- case 4:
- numchan = 0;
- break;
- case 5:
- numchan = 8;
- break;
- case 6:
- numchan = 0;
- break;
- case 7:
- numchan = 16;
- break;
- case 8:
- numchan = 2;
- break;
- }
+ numchan = adev->df_funcs->get_hbm_channel_number(adev);
adev->gmc.vram_width = numchan * chansize;
}
@@ -826,6 +789,52 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
return amdgpu_gart_table_vram_alloc(adev);
}
+static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
+{
+#if 0
+ u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
+#endif
+ unsigned size;
+
+ /*
+ * TODO Remove once GART corruption is resolved
+ * Check related code in gmc_v9_0_sw_fini
+ * */
+ size = 9 * 1024 * 1024;
+
+#if 0
+ if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+ size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
+ } else {
+ u32 viewport;
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
+ size = (REG_GET_FIELD(viewport,
+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(viewport,
+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
+ 4);
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ default:
+ viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
+ size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
+ 4);
+ break;
+ }
+ }
+ /* return 0 if the pre-OS buffer uses up most of vram */
+ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
+ return 0;
+
+#endif
+ return size;
+}
+
static int gmc_v9_0_sw_init(void *handle)
{
int r;
@@ -877,12 +886,6 @@ static int gmc_v9_0_sw_init(void *handle)
*/
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
- /*
- * It needs to reserve 8M stolen memory for vega10
- * TODO: Figure out how to avoid that...
- */
- adev->gmc.stolen_size = 8 * 1024 * 1024;
-
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 44-bits.
* IGP - can handle 44-bits
@@ -907,6 +910,8 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
+ adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
+
/* Memory manager */
r = amdgpu_bo_init(adev);
if (r)
@@ -950,6 +955,18 @@ static int gmc_v9_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
gmc_v9_0_gart_fini(adev);
+
+ /*
+ * TODO:
+ * Currently there is a bug where some memory client outside
+ * of the driver writes to first 8M of VRAM on S3 resume,
+ * this overrides GART which by default gets placed in first 8M and
+ * causes VM_FAULTS once GTT is accessed.
+ * Keep the stolen memory reservation until the while this is not solved.
+ * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
+ */
+ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+
amdgpu_bo_fini(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 26ba984..17f7f07 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2817,7 +2817,7 @@ static int kv_dpm_init(struct amdgpu_device *adev)
pi->caps_tcp_ramping = true;
}
- if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
+ if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
pi->caps_sclk_ds = true;
else
pi->caps_sclk_ds = false;
@@ -2974,7 +2974,7 @@ static int kv_dpm_late_init(void *handle)
/* powerdown unused blocks for now */
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!amdgpu_dpm)
+ if (!adev->pm.dpm_enabled)
return 0;
kv_dpm_powergate_acp(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 4933486..078f70f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -260,8 +260,10 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
} while (timeout > 1);
flr_done:
- if (locked)
+ if (locked) {
+ adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
+ }
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_lockup_timeout == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 8da6da9..0cf48d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -40,11 +40,20 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_INIT_GPCOM_RING = 0x00020000, /* initialize GPCOM ring */
GFX_CTRL_CMD_ID_DESTROY_RINGS = 0x00030000, /* destroy rings */
GFX_CTRL_CMD_ID_CAN_INIT_RINGS = 0x00040000, /* is it allowed to initialized the rings */
+ GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */
+ GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
+ GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
};
+/*-----------------------------------------------------------------------------
+ NOTE: All physical addresses used in this interface are actually
+ GPU Virtual Addresses.
+*/
+
+
/* Control registers of the TEE Gfx interface. These are located in
* SRBM-to-PSP mailbox registers (total 8 registers).
*/
@@ -55,8 +64,8 @@ struct psp_gfx_ctrl
volatile uint32_t rbi_rptr; /* +8 Read pointer (index) of RBI ring */
volatile uint32_t gpcom_wptr; /* +12 Write pointer (index) of GPCOM ring */
volatile uint32_t gpcom_rptr; /* +16 Read pointer (index) of GPCOM ring */
- volatile uint32_t ring_addr_lo; /* +20 bits [31:0] of physical address of ring buffer */
- volatile uint32_t ring_addr_hi; /* +24 bits [63:32] of physical address of ring buffer */
+ volatile uint32_t ring_addr_lo; /* +20 bits [31:0] of GPU Virtual of ring buffer (VMID=0)*/
+ volatile uint32_t ring_addr_hi; /* +24 bits [63:32] of GPU Virtual of ring buffer (VMID=0) */
volatile uint32_t ring_buf_size; /* +28 Ring buffer size (in bytes) */
};
@@ -78,6 +87,8 @@ enum psp_gfx_cmd_id
GFX_CMD_ID_LOAD_ASD = 0x00000004, /* load ASD Driver */
GFX_CMD_ID_SETUP_TMR = 0x00000005, /* setup TMR region */
GFX_CMD_ID_LOAD_IP_FW = 0x00000006, /* load HW IP FW */
+ GFX_CMD_ID_DESTROY_TMR = 0x00000007, /* destroy TMR region */
+ GFX_CMD_ID_SAVE_RESTORE = 0x00000008, /* save/restore HW IP FW */
};
@@ -85,11 +96,11 @@ enum psp_gfx_cmd_id
/* Command to load Trusted Application binary into PSP OS. */
struct psp_gfx_cmd_load_ta
{
- uint32_t app_phy_addr_lo; /* bits [31:0] of the physical address of the TA binary (must be 4 KB aligned) */
- uint32_t app_phy_addr_hi; /* bits [63:32] of the physical address of the TA binary */
+ uint32_t app_phy_addr_lo; /* bits [31:0] of the GPU Virtual address of the TA binary (must be 4 KB aligned) */
+ uint32_t app_phy_addr_hi; /* bits [63:32] of the GPU Virtual address of the TA binary */
uint32_t app_len; /* length of the TA binary in bytes */
- uint32_t cmd_buf_phy_addr_lo; /* bits [31:0] of the physical address of CMD buffer (must be 4 KB aligned) */
- uint32_t cmd_buf_phy_addr_hi; /* bits [63:32] of the physical address of CMD buffer */
+ uint32_t cmd_buf_phy_addr_lo; /* bits [31:0] of the GPU Virtual address of CMD buffer (must be 4 KB aligned) */
+ uint32_t cmd_buf_phy_addr_hi; /* bits [63:32] of the GPU Virtual address of CMD buffer */
uint32_t cmd_buf_len; /* length of the CMD buffer in bytes; must be multiple of 4 KB */
/* Note: CmdBufLen can be set to 0. In this case no persistent CMD buffer is provided
@@ -111,8 +122,8 @@ struct psp_gfx_cmd_unload_ta
*/
struct psp_gfx_buf_desc
{
- uint32_t buf_phy_addr_lo; /* bits [31:0] of physical address of the buffer (must be 4 KB aligned) */
- uint32_t buf_phy_addr_hi; /* bits [63:32] of physical address of the buffer */
+ uint32_t buf_phy_addr_lo; /* bits [31:0] of GPU Virtual address of the buffer (must be 4 KB aligned) */
+ uint32_t buf_phy_addr_hi; /* bits [63:32] of GPU Virtual address of the buffer */
uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB and no bigger than 64 MB) */
};
@@ -145,8 +156,8 @@ struct psp_gfx_cmd_invoke_cmd
/* Command to setup TMR region. */
struct psp_gfx_cmd_setup_tmr
{
- uint32_t buf_phy_addr_lo; /* bits [31:0] of physical address of TMR buffer (must be 4 KB aligned) */
- uint32_t buf_phy_addr_hi; /* bits [63:32] of physical address of TMR buffer */
+ uint32_t buf_phy_addr_lo; /* bits [31:0] of GPU Virtual address of TMR buffer (must be 4 KB aligned) */
+ uint32_t buf_phy_addr_hi; /* bits [63:32] of GPU Virtual address of TMR buffer */
uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB) */
};
@@ -174,18 +185,32 @@ enum psp_gfx_fw_type
GFX_FW_TYPE_ISP = 16,
GFX_FW_TYPE_ACP = 17,
GFX_FW_TYPE_SMU = 18,
+ GFX_FW_TYPE_MMSCH = 19,
+ GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM = 20,
+ GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM = 21,
+ GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL = 22,
+ GFX_FW_TYPE_MAX = 23
};
/* Command to load HW IP FW. */
struct psp_gfx_cmd_load_ip_fw
{
- uint32_t fw_phy_addr_lo; /* bits [31:0] of physical address of FW location (must be 4 KB aligned) */
- uint32_t fw_phy_addr_hi; /* bits [63:32] of physical address of FW location */
+ uint32_t fw_phy_addr_lo; /* bits [31:0] of GPU Virtual address of FW location (must be 4 KB aligned) */
+ uint32_t fw_phy_addr_hi; /* bits [63:32] of GPU Virtual address of FW location */
uint32_t fw_size; /* FW buffer size in bytes */
enum psp_gfx_fw_type fw_type; /* FW type */
};
+/* Command to save/restore HW IP FW. */
+struct psp_gfx_cmd_save_restore_ip_fw
+{
+ uint32_t save_fw; /* if set, command is used for saving fw otherwise for resetoring*/
+ uint32_t save_restore_addr_lo; /* bits [31:0] of FB address of GART memory used as save/restore buffer (must be 4 KB aligned) */
+ uint32_t save_restore_addr_hi; /* bits [63:32] of FB address of GART memory used as save/restore buffer */
+ uint32_t buf_size; /* Size of the save/restore buffer in bytes */
+ enum psp_gfx_fw_type fw_type; /* FW type */
+};
/* All GFX ring buffer commands. */
union psp_gfx_commands
@@ -195,7 +220,7 @@ union psp_gfx_commands
struct psp_gfx_cmd_invoke_cmd cmd_invoke_cmd;
struct psp_gfx_cmd_setup_tmr cmd_setup_tmr;
struct psp_gfx_cmd_load_ip_fw cmd_load_ip_fw;
-
+ struct psp_gfx_cmd_save_restore_ip_fw cmd_save_restore_ip_fw;
};
@@ -226,8 +251,8 @@ struct psp_gfx_cmd_resp
/* These fields are used for RBI only. They are all 0 in GPCOM commands
*/
- uint32_t resp_buf_addr_lo; /* +12 bits [31:0] of physical address of response buffer (must be 4 KB aligned) */
- uint32_t resp_buf_addr_hi; /* +16 bits [63:32] of physical address of response buffer */
+ uint32_t resp_buf_addr_lo; /* +12 bits [31:0] of GPU Virtual address of response buffer (must be 4 KB aligned) */
+ uint32_t resp_buf_addr_hi; /* +16 bits [63:32] of GPU Virtual address of response buffer */
uint32_t resp_offset; /* +20 offset within response buffer */
uint32_t resp_buf_size; /* +24 total size of the response buffer in bytes */
@@ -251,19 +276,19 @@ struct psp_gfx_cmd_resp
/* Structure of the Ring Buffer Frame */
struct psp_gfx_rb_frame
{
- uint32_t cmd_buf_addr_lo; /* +0 bits [31:0] of physical address of command buffer (must be 4 KB aligned) */
- uint32_t cmd_buf_addr_hi; /* +4 bits [63:32] of physical address of command buffer */
+ uint32_t cmd_buf_addr_lo; /* +0 bits [31:0] of GPU Virtual address of command buffer (must be 4 KB aligned) */
+ uint32_t cmd_buf_addr_hi; /* +4 bits [63:32] of GPU Virtual address of command buffer */
uint32_t cmd_buf_size; /* +8 command buffer size in bytes */
- uint32_t fence_addr_lo; /* +12 bits [31:0] of physical address of Fence for this frame */
- uint32_t fence_addr_hi; /* +16 bits [63:32] of physical address of Fence for this frame */
+ uint32_t fence_addr_lo; /* +12 bits [31:0] of GPU Virtual address of Fence for this frame */
+ uint32_t fence_addr_hi; /* +16 bits [63:32] of GPU Virtual address of Fence for this frame */
uint32_t fence_value; /* +20 Fence value */
uint32_t sid_lo; /* +24 bits [31:0] of SID value (used only for RBI frames) */
uint32_t sid_hi; /* +28 bits [63:32] of SID value (used only for RBI frames) */
uint8_t vmid; /* +32 VMID value used for mapping of all addresses for this frame */
uint8_t frame_type; /* +33 1: destory context frame, 0: all other frames; used only for RBI frames */
uint8_t reserved1[2]; /* +34 reserved, must be 0 */
- uint32_t reserved2[7]; /* +40 reserved, must be 0 */
- /* total 64 bytes */
+ uint32_t reserved2[7]; /* +36 reserved, must be 0 */
+ /* total 64 bytes */
};
#endif /* _PSP_TEE_GFX_IF_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 8873d83..0ff136d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -70,6 +70,15 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
case AMDGPU_UCODE_ID_RLC_G:
*type = GFX_FW_TYPE_RLC_G;
break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
+ *type = GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
+ *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
+ *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
+ break;
case AMDGPU_UCODE_ID_SMC:
*type = GFX_FW_TYPE_SMU;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index be20a38..aa9ab29 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -62,6 +62,8 @@ MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/vegam_sdma.bin");
+MODULE_FIRMWARE("amdgpu/vegam_sdma1.bin");
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
@@ -209,6 +211,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
amdgpu_device_program_register_sequence(adev,
golden_settings_polaris11_a11,
ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -275,15 +278,18 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
case CHIP_FIJI:
chip_name = "fiji";
break;
- case CHIP_POLARIS11:
- chip_name = "polaris11";
- break;
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
+ case CHIP_POLARIS11:
+ chip_name = "polaris11";
+ break;
case CHIP_POLARIS12:
chip_name = "polaris12";
break;
+ case CHIP_VEGAM:
+ chip_name = "vegam";
+ break;
case CHIP_CARRIZO:
chip_name = "carrizo";
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 399f876..03a36cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -360,6 +360,31 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
}
+static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring,
+ int mem_space, int hdp,
+ uint32_t addr0, uint32_t addr1,
+ uint32_t ref, uint32_t mask,
+ uint32_t inv)
+{
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
+ SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+ if (mem_space) {
+ /* memory */
+ amdgpu_ring_write(ring, addr0);
+ amdgpu_ring_write(ring, addr1);
+ } else {
+ /* registers */
+ amdgpu_ring_write(ring, addr0 << 2);
+ amdgpu_ring_write(ring, addr1 << 2);
+ }
+ amdgpu_ring_write(ring, ref); /* reference */
+ amdgpu_ring_write(ring, mask); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
+}
+
/**
* sdma_v4_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
@@ -378,15 +403,10 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
else
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
- amdgpu_ring_write(ring, ref_and_mask); /* reference */
- amdgpu_ring_write(ring, ref_and_mask); /* mask */
- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ sdma_v4_0_wait_reg_mem(ring, 0, 1,
+ adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio_funcs->get_hdp_flush_req_offset(adev),
+ ref_and_mask, ref_and_mask, 10);
}
/**
@@ -1114,16 +1134,10 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
uint64_t addr = ring->fence_drv.gpu_addr;
/* wait for idle */
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
- SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
- amdgpu_ring_write(ring, addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
- amdgpu_ring_write(ring, seq); /* reference */
- amdgpu_ring_write(ring, 0xffffffff); /* mask */
- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
+ sdma_v4_0_wait_reg_mem(ring, 1, 0,
+ addr & 0xfffffffc,
+ upper_32_bits(addr) & 0xffffffff,
+ seq, 0xffffffff, 4);
}
@@ -1154,15 +1168,7 @@ static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
- amdgpu_ring_write(ring, reg << 2);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, val); /* reference */
- amdgpu_ring_write(ring, mask); /* mask */
- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
+ sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
}
static int sdma_v4_0_early_init(void *handle)
@@ -1605,6 +1611,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.pad_ib = sdma_v4_0_ring_pad_ib,
.emit_wreg = sdma_v4_0_ring_emit_wreg,
.emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index a675ec6..c364ef9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1252,6 +1252,12 @@ static void si_invalidate_hdp(struct amdgpu_device *adev,
}
}
+static bool si_need_full_reset(struct amdgpu_device *adev)
+{
+ /* change this when we support soft reset */
+ return true;
+}
+
static int si_get_pcie_lanes(struct amdgpu_device *adev)
{
u32 link_width_cntl;
@@ -1332,6 +1338,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.get_config_memsize = &si_get_config_memsize,
.flush_hdp = &si_flush_hdp,
.invalidate_hdp = &si_invalidate_hdp,
+ .need_full_reset = &si_need_full_reset,
};
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 797d505..b12d7c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -7580,7 +7580,7 @@ static int si_dpm_late_init(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!amdgpu_dpm)
+ if (!adev->pm.dpm_enabled)
return 0;
ret = si_set_temperature_range(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 51cf8a3..9006576 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -52,6 +52,7 @@
#include "gmc_v9_0.h"
#include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h"
+#include "df_v1_7.h"
#include "vega10_ih.h"
#include "sdma_v4_0.h"
#include "uvd_v7_0.h"
@@ -60,33 +61,6 @@
#include "dce_virtual.h"
#include "mxgpu_ai.h"
-#define mmFabricConfigAccessControl 0x0410
-#define mmFabricConfigAccessControl_BASE_IDX 0
-#define mmFabricConfigAccessControl_DEFAULT 0x00000000
-//FabricConfigAccessControl
-#define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0
-#define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1
-#define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10
-#define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L
-#define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L
-#define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L
-
-
-#define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc
-#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0
-//DF_PIE_AON0_DfGlobalClkGater
-#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0
-#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL
-
-enum {
- DF_MGCG_DISABLE = 0,
- DF_MGCG_ENABLE_00_CYCLE_DELAY =1,
- DF_MGCG_ENABLE_01_CYCLE_DELAY =2,
- DF_MGCG_ENABLE_15_CYCLE_DELAY =13,
- DF_MGCG_ENABLE_31_CYCLE_DELAY =14,
- DF_MGCG_ENABLE_63_CYCLE_DELAY =15
-};
-
#define mmMP0_MISC_CGTT_CTRL0 0x01b9
#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
@@ -313,6 +287,7 @@ static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
+ { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
};
static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
@@ -341,6 +316,8 @@ static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
} else {
if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
return adev->gfx.config.gb_addr_config;
+ else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
+ return adev->gfx.config.db_debug2;
return RREG32(reg_offset);
}
}
@@ -521,6 +498,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
else
adev->nbio_funcs = &nbio_v6_1_funcs;
+ adev->df_funcs = &df_v1_7_funcs;
adev->nbio_funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
@@ -593,6 +571,12 @@ static void soc15_invalidate_hdp(struct amdgpu_device *adev,
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
}
+static bool soc15_need_full_reset(struct amdgpu_device *adev)
+{
+ /* change this when we implement soft reset */
+ return true;
+}
+
static const struct amdgpu_asic_funcs soc15_asic_funcs =
{
.read_disabled_bios = &soc15_read_disabled_bios,
@@ -606,6 +590,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.get_config_memsize = &soc15_get_config_memsize,
.flush_hdp = &soc15_flush_hdp,
.invalidate_hdp = &soc15_invalidate_hdp,
+ .need_full_reset = &soc15_need_full_reset,
};
static int soc15_common_early_init(void *handle)
@@ -697,6 +682,11 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_LS;
adev->pg_flags = AMD_PG_SUPPORT_SDMA;
+ if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_RLC_SMU_HS;
+
adev->external_rev_id = 0x1;
break;
default:
@@ -871,32 +861,6 @@ static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *ade
WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
}
-static void soc15_update_df_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
-{
- uint32_t data;
-
- /* Put DF on broadcast mode */
- data = RREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl));
- data &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
- WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), data);
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
- data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
- data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
- data |= DF_MGCG_ENABLE_15_CYCLE_DELAY;
- WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
- } else {
- data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
- data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
- data |= DF_MGCG_DISABLE;
- WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data);
- }
-
- WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl),
- mmFabricConfigAccessControl_DEFAULT);
-}
-
static int soc15_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -920,7 +884,7 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_rom_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- soc15_update_df_medium_grain_clock_gating(adev,
+ adev->df_funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
case CHIP_RAVEN:
@@ -973,10 +937,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
*flags |= AMD_CG_SUPPORT_ROM_MGCG;
- /* AMD_CG_SUPPORT_DF_MGCG */
- data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater));
- if (data & DF_MGCG_ENABLE_15_CYCLE_DELAY)
- *flags |= AMD_CG_SUPPORT_DF_MGCG;
+ adev->df_funcs->get_clockgating_state(adev, flags);
}
static int soc15_common_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index f22f7a8..8dc2910 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -159,6 +159,7 @@
#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */
#define EOP_TCL1_ACTION_EN (1 << 16)
#define EOP_TC_ACTION_EN (1 << 17) /* L2 */
+#define EOP_TC_NC_ACTION_EN (1 << 19)
#define EOP_TC_MD_ACTION_EN (1 << 21) /* L2 metadata */
#define DATA_SEL(x) ((x) << 29)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 948bb943..87cbb14 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -688,7 +688,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
if (state == AMD_PG_STATE_GATE) {
uvd_v4_2_stop(adev);
- if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
+ if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
if (!(RREG32_SMC(ixCURRENT_PG_STATUS) &
CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) {
WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
@@ -699,7 +699,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
}
return 0;
} else {
- if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) {
+ if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) {
if (RREG32_SMC(ixCURRENT_PG_STATUS) &
CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index f26f515..ca6ab56 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -62,7 +62,7 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
{
return ((adev->asic_type >= CHIP_POLARIS10) &&
- (adev->asic_type <= CHIP_POLARIS12) &&
+ (adev->asic_type <= CHIP_VEGAM) &&
(!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
}
@@ -429,7 +429,7 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
- rq, amdgpu_sched_jobs, NULL);
+ rq, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
return r;
@@ -964,6 +964,16 @@ static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
}
/**
+ * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
+ *
+ * @ring: amdgpu_ring pointer
+ */
+static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ /* The firmware doesn't seem to like touching registers at this point. */
+}
+
+/**
* uvd_v6_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -1528,12 +1538,13 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.set_wptr = uvd_v6_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_frame_size =
- 6 + 6 + /* hdp flush / invalidate */
+ 6 + /* hdp invalidate */
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
+ .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
.test_ring = uvd_v6_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
@@ -1552,7 +1563,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
.emit_frame_size =
- 6 + 6 + /* hdp flush / invalidate */
+ 6 + /* hdp invalidate */
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
@@ -1561,6 +1572,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
.emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
+ .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
.test_ring = uvd_v6_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index eddc57f..0ca63d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -418,7 +418,7 @@ static int uvd_v7_0_sw_init(void *handle)
ring = &adev->uvd.ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
- rq, amdgpu_sched_jobs, NULL);
+ rq, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
return r;
@@ -1136,6 +1136,16 @@ static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
}
/**
+ * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
+ *
+ * @ring: amdgpu_ring pointer
+ */
+static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ /* The firmware doesn't seem to like touching registers at this point. */
+}
+
+/**
* uvd_v7_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -1654,7 +1664,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.get_wptr = uvd_v7_0_ring_get_wptr,
.set_wptr = uvd_v7_0_ring_set_wptr,
.emit_frame_size =
- 6 + 6 + /* hdp flush / invalidate */
+ 6 + /* hdp invalidate */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
8 + /* uvd_v7_0_ring_emit_vm_flush */
@@ -1663,6 +1673,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.emit_ib = uvd_v7_0_ring_emit_ib,
.emit_fence = uvd_v7_0_ring_emit_fence,
.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
+ .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
.test_ring = uvd_v7_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = uvd_v7_0_ring_insert_nop,
@@ -1671,6 +1682,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.end_use = amdgpu_uvd_ring_end_use,
.emit_wreg = uvd_v7_0_ring_emit_wreg,
.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
@@ -1702,6 +1714,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
.end_use = amdgpu_uvd_ring_end_use,
.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 428d192..0999c84 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -388,7 +388,8 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
default:
if ((adev->asic_type == CHIP_POLARIS10) ||
(adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12))
+ (adev->asic_type == CHIP_POLARIS12) ||
+ (adev->asic_type == CHIP_VEGAM))
return AMDGPU_VCE_HARVEST_VCE1;
return 0;
@@ -467,8 +468,8 @@ static int vce_v3_0_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
vce_v3_0_override_vce_clock_gating(adev, true);
- if (!(adev->flags & AMD_IS_APU))
- amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+
+ amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
for (i = 0; i < adev->vce.num_rings; i++)
adev->vce.ring[i].ready = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 73fd48d..8fd1b74 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -1081,6 +1081,7 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
.end_use = amdgpu_vce_ring_end_use,
.emit_wreg = vce_v4_0_emit_wreg,
.emit_reg_wait = vce_v4_0_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 8c13267..0501746b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1109,6 +1109,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
@@ -1139,6 +1140,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 126f127..4ac1288 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -305,9 +305,10 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
stoney_mgcg_cgcg_init,
ARRAY_SIZE(stoney_mgcg_cgcg_init));
break;
- case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
default:
break;
}
@@ -728,33 +729,59 @@ static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
return r;
tmp = RREG32_SMC(cntl_reg);
- tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
- CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
+
+ if (adev->flags & AMD_IS_APU)
+ tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
+ else
+ tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
+ CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
tmp |= dividers.post_divider;
WREG32_SMC(cntl_reg, tmp);
for (i = 0; i < 100; i++) {
- if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
- break;
+ tmp = RREG32_SMC(status_reg);
+ if (adev->flags & AMD_IS_APU) {
+ if (tmp & 0x10000)
+ break;
+ } else {
+ if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
+ break;
+ }
mdelay(10);
}
if (i == 100)
return -ETIMEDOUT;
-
return 0;
}
+#define ixGNB_CLK1_DFS_CNTL 0xD82200F0
+#define ixGNB_CLK1_STATUS 0xD822010C
+#define ixGNB_CLK2_DFS_CNTL 0xD8220110
+#define ixGNB_CLK2_STATUS 0xD822012C
+#define ixGNB_CLK3_DFS_CNTL 0xD8220130
+#define ixGNB_CLK3_STATUS 0xD822014C
+
static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
{
int r;
- r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
- if (r)
- return r;
+ if (adev->flags & AMD_IS_APU) {
+ r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
+ if (r)
+ return r;
- r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
- if (r)
- return r;
+ r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
+ if (r)
+ return r;
+ } else {
+ r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
+ if (r)
+ return r;
+
+ r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
+ if (r)
+ return r;
+ }
return 0;
}
@@ -764,6 +791,22 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
int r, i;
struct atom_clock_dividers dividers;
u32 tmp;
+ u32 reg_ctrl;
+ u32 reg_status;
+ u32 status_mask;
+ u32 reg_mask;
+
+ if (adev->flags & AMD_IS_APU) {
+ reg_ctrl = ixGNB_CLK3_DFS_CNTL;
+ reg_status = ixGNB_CLK3_STATUS;
+ status_mask = 0x00010000;
+ reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
+ } else {
+ reg_ctrl = ixCG_ECLK_CNTL;
+ reg_status = ixCG_ECLK_STATUS;
+ status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
+ reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
+ }
r = amdgpu_atombios_get_clock_dividers(adev,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
@@ -772,24 +815,25 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
return r;
for (i = 0; i < 100; i++) {
- if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
+ if (RREG32_SMC(reg_status) & status_mask)
break;
mdelay(10);
}
+
if (i == 100)
return -ETIMEDOUT;
- tmp = RREG32_SMC(ixCG_ECLK_CNTL);
- tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
- CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
+ tmp = RREG32_SMC(reg_ctrl);
+ tmp &= ~reg_mask;
tmp |= dividers.post_divider;
- WREG32_SMC(ixCG_ECLK_CNTL, tmp);
+ WREG32_SMC(reg_ctrl, tmp);
for (i = 0; i < 100; i++) {
- if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
+ if (RREG32_SMC(reg_status) & status_mask)
break;
mdelay(10);
}
+
if (i == 100)
return -ETIMEDOUT;
@@ -876,6 +920,27 @@ static void vi_invalidate_hdp(struct amdgpu_device *adev,
}
}
+static bool vi_need_full_reset(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ /* CZ has hang issues with full reset at the moment */
+ return false;
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ /* XXX: soft reset should work on fiji and tonga */
+ return true;
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_TOPAZ:
+ default:
+ /* change this when we support soft reset */
+ return true;
+ }
+}
+
static const struct amdgpu_asic_funcs vi_asic_funcs =
{
.read_disabled_bios = &vi_read_disabled_bios,
@@ -889,6 +954,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.get_config_memsize = &vi_get_config_memsize,
.flush_hdp = &vi_flush_hdp,
.invalidate_hdp = &vi_invalidate_hdp,
+ .need_full_reset = &vi_need_full_reset,
};
#define CZ_REV_BRISTOL(rev) \
@@ -1031,6 +1097,30 @@ static int vi_common_early_init(void *handle)
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x64;
break;
+ case CHIP_VEGAM:
+ adev->cg_flags = 0;
+ /*AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_3D_CGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGLS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_MGCG |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_DRM_LS |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_VCE_MGCG;*/
+ adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 0x6E;
+ break;
case CHIP_CARRIZO:
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_GFX_MGCG |
@@ -1422,6 +1512,7 @@ static int vi_common_set_clockgating_state(void *handle,
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
vi_common_set_clockgating_state_by_smu(adev, state);
default:
break;
@@ -1551,9 +1642,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
}
break;
- case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
case CHIP_POLARIS12:
+ case CHIP_VEGAM:
amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 5b124a6..e6ca72c 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -9,14 +9,6 @@ config DRM_AMD_DC
support for AMDGPU. This adds required support for Vega and
Raven ASICs.
-config DRM_AMD_DC_PRE_VEGA
- bool "DC support for Polaris and older ASICs"
- default y
- help
- Choose this option to enable the new DC support for older asics
- by default. This includes Polaris, Carrizo, Tonga, Bonaire,
- and Hawaii.
-
config DRM_AMD_DC_FBC
bool "AMD FBC - Enable Frame Buffer Compression"
depends on DRM_AMD_DC
@@ -42,4 +34,10 @@ config DEBUG_KERNEL_DC
if you want to hit
kdgb_break in assert.
+config DRM_AMD_DC_VEGAM
+ bool "VEGAM support"
+ depends on DRM_AMD_DC
+ help
+ Choose this option if you want to have
+ VEGAM support for display engine
endmenu
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1dd1142..f2f54a9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -433,11 +433,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
- if (amdgpu_dc_log)
- init_data.log_mask = DC_DEFAULT_LOG_MASK;
- else
- init_data.log_mask = DC_MIN_LOG_MASK;
-
/*
* TODO debug why this doesn't work on Raven
*/
@@ -649,18 +644,6 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
static int dm_resume(void *handle)
{
struct amdgpu_device *adev = handle;
- struct amdgpu_display_manager *dm = &adev->dm;
- int ret = 0;
-
- /* power on hardware */
- dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
-
- ret = amdgpu_dm_display_resume(adev);
- return ret;
-}
-
-int amdgpu_dm_display_resume(struct amdgpu_device *adev)
-{
struct drm_device *ddev = adev->ddev;
struct amdgpu_display_manager *dm = &adev->dm;
struct amdgpu_dm_connector *aconnector;
@@ -671,10 +654,12 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
-
- int ret = 0;
+ int ret;
int i;
+ /* power on hardware */
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
/* program HPD filter */
dc_resume(dm->dc);
@@ -688,8 +673,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
amdgpu_dm_irq_resume_early(adev);
/* Do detection*/
- list_for_each_entry(connector,
- &ddev->mode_config.connector_list, head) {
+ list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
aconnector = to_amdgpu_dm_connector(connector);
/*
@@ -711,7 +695,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
}
/* Force mode set in atomic comit */
- for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
new_crtc_state->active_changed = true;
/*
@@ -719,7 +703,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
* them here, since they were duplicated as part of the suspend
* procedure.
*/
- for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->stream) {
WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
@@ -728,7 +712,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
}
}
- for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
+ for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
dm_new_plane_state = to_dm_plane_state(new_plane_state);
if (dm_new_plane_state->dc_state) {
WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
@@ -737,9 +721,9 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev)
}
}
- ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
+ ret = drm_atomic_helper_resume(ddev, dm->cached_state);
- adev->dm.cached_state = NULL;
+ dm->cached_state = NULL;
amdgpu_dm_irq_resume_late(adev);
@@ -1529,6 +1513,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case CHIP_VEGAM:
+#endif
case CHIP_VEGA10:
case CHIP_VEGA12:
if (dce110_register_irq_handlers(dm->adev)) {
@@ -1549,7 +1536,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
break;
#endif
default:
- DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+ DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
goto fail;
}
@@ -1657,7 +1644,6 @@ static ssize_t s3_debug_store(struct device *device,
if (ret == 0) {
if (s3_state) {
dm_resume(adev);
- amdgpu_dm_display_resume(adev);
drm_kms_helper_hotplug_event(adev->ddev);
} else
dm_suspend(adev);
@@ -1722,6 +1708,9 @@ static int dm_early_init(void *handle)
adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_POLARIS10:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case CHIP_VEGAM:
+#endif
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
@@ -1743,7 +1732,7 @@ static int dm_early_init(void *handle)
break;
#endif
default:
- DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+ DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
}
@@ -1848,7 +1837,7 @@ static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
uint64_t *tiling_flags)
{
- struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
int r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r)) {
@@ -2017,7 +2006,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *amdgpu_fb =
to_amdgpu_framebuffer(plane_state->fb);
const struct drm_crtc *crtc = plane_state->crtc;
- struct dc_transfer_func *input_tf;
int ret = 0;
if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
@@ -2031,13 +2019,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
if (ret)
return ret;
- input_tf = dc_create_transfer_func();
-
- if (input_tf == NULL)
- return -ENOMEM;
-
- dc_plane_state->in_transfer_func = input_tf;
-
/*
* Always set input transfer function, since plane state is refreshed
* every time.
@@ -2206,7 +2187,6 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
const struct drm_connector *connector)
{
struct dc_crtc_timing *timing_out = &stream->timing;
- struct dc_transfer_func *tf = dc_create_transfer_func();
memset(timing_out, 0, sizeof(struct dc_crtc_timing));
@@ -2250,9 +2230,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
stream->output_color_space = get_output_color_space(timing_out);
- tf->type = TF_TYPE_PREDEFINED;
- tf->tf = TRANSFER_FUNCTION_SRGB;
- stream->out_transfer_func = tf;
+ stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
+ stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
}
static void fill_audio_info(struct audio_info *audio_info,
@@ -2488,6 +2467,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
update_stream_signal(stream);
+ if (dm_state && dm_state->freesync_capable)
+ stream->ignore_msa_timing_param = true;
+
return stream;
}
@@ -2710,18 +2692,15 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
const struct dc_link *link = aconnector->dc_link;
struct amdgpu_device *adev = connector->dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
+
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
- link->type != dc_connection_none) {
- amdgpu_dm_register_backlight_device(dm);
-
- if (dm->backlight_dev) {
- backlight_device_unregister(dm->backlight_dev);
- dm->backlight_dev = NULL;
- }
-
+ link->type != dc_connection_none &&
+ dm->backlight_dev) {
+ backlight_device_unregister(dm->backlight_dev);
+ dm->backlight_dev = NULL;
}
#endif
drm_connector_unregister(connector);
@@ -2855,7 +2834,7 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
create_eml_sink(aconnector);
}
-int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int result = MODE_ERROR;
@@ -3058,8 +3037,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
}
afb = to_amdgpu_framebuffer(new_state->fb);
-
- obj = afb->obj;
+ obj = new_state->fb->obj[0];
rbo = gem_to_amdgpu_bo(obj);
adev = amdgpu_ttm_adev(rbo->tbo.bdev);
r = amdgpu_bo_reserve(rbo, false);
@@ -3067,12 +3045,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
return r;
if (plane->type != DRM_PLANE_TYPE_CURSOR)
- domain = amdgpu_display_framebuffer_domains(adev);
+ domain = amdgpu_display_supported_domains(adev);
else
domain = AMDGPU_GEM_DOMAIN_VRAM;
r = amdgpu_bo_pin(rbo, domain, &afb->address);
-
amdgpu_bo_unreserve(rbo);
if (unlikely(r != 0)) {
@@ -3123,14 +3100,12 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct amdgpu_bo *rbo;
- struct amdgpu_framebuffer *afb;
int r;
if (!old_state->fb)
return;
- afb = to_amdgpu_framebuffer(old_state->fb);
- rbo = gem_to_amdgpu_bo(afb->obj);
+ rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r)) {
DRM_ERROR("failed to reserve rbo before unpin\n");
@@ -3773,7 +3748,7 @@ static void remove_stream(struct amdgpu_device *adev,
static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
struct dc_cursor_position *position)
{
- struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
int x, y;
int xorigin = 0, yorigin = 0;
@@ -3905,7 +3880,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
int r, vpos, hpos;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
- struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
struct amdgpu_device *adev = crtc->dev->dev_private;
bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
struct dc_flip_addrs addr = { {0} };
@@ -3986,6 +3961,96 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
+/*
+ * TODO this whole function needs to go
+ *
+ * dc_surface_update is needlessly complex. See if we can just replace this
+ * with a dc_plane_state and follow the atomic model a bit more closely here.
+ */
+static bool commit_planes_to_stream(
+ struct dc *dc,
+ struct dc_plane_state **plane_states,
+ uint8_t new_plane_count,
+ struct dm_crtc_state *dm_new_crtc_state,
+ struct dm_crtc_state *dm_old_crtc_state,
+ struct dc_state *state)
+{
+ /* no need to dynamically allocate this. it's pretty small */
+ struct dc_surface_update updates[MAX_SURFACES];
+ struct dc_flip_addrs *flip_addr;
+ struct dc_plane_info *plane_info;
+ struct dc_scaling_info *scaling_info;
+ int i;
+ struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
+ struct dc_stream_update *stream_update =
+ kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
+
+ if (!stream_update) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
+ GFP_KERNEL);
+ plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
+ GFP_KERNEL);
+ scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
+ GFP_KERNEL);
+
+ if (!flip_addr || !plane_info || !scaling_info) {
+ kfree(flip_addr);
+ kfree(plane_info);
+ kfree(scaling_info);
+ kfree(stream_update);
+ return false;
+ }
+
+ memset(updates, 0, sizeof(updates));
+
+ stream_update->src = dc_stream->src;
+ stream_update->dst = dc_stream->dst;
+ stream_update->out_transfer_func = dc_stream->out_transfer_func;
+
+ for (i = 0; i < new_plane_count; i++) {
+ updates[i].surface = plane_states[i];
+ updates[i].gamma =
+ (struct dc_gamma *)plane_states[i]->gamma_correction;
+ updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
+ flip_addr[i].address = plane_states[i]->address;
+ flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
+ plane_info[i].color_space = plane_states[i]->color_space;
+ plane_info[i].format = plane_states[i]->format;
+ plane_info[i].plane_size = plane_states[i]->plane_size;
+ plane_info[i].rotation = plane_states[i]->rotation;
+ plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
+ plane_info[i].stereo_format = plane_states[i]->stereo_format;
+ plane_info[i].tiling_info = plane_states[i]->tiling_info;
+ plane_info[i].visible = plane_states[i]->visible;
+ plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
+ plane_info[i].dcc = plane_states[i]->dcc;
+ scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
+ scaling_info[i].src_rect = plane_states[i]->src_rect;
+ scaling_info[i].dst_rect = plane_states[i]->dst_rect;
+ scaling_info[i].clip_rect = plane_states[i]->clip_rect;
+
+ updates[i].flip_addr = &flip_addr[i];
+ updates[i].plane_info = &plane_info[i];
+ updates[i].scaling_info = &scaling_info[i];
+ }
+
+ dc_commit_updates_for_stream(
+ dc,
+ updates,
+ new_plane_count,
+ dc_stream, stream_update, plane_states, state);
+
+ kfree(flip_addr);
+ kfree(plane_info);
+ kfree(scaling_info);
+ kfree(stream_update);
+ return true;
+}
+
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
@@ -4001,6 +4066,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_crtc_state *new_pcrtc_state =
drm_atomic_get_new_crtc_state(state, pcrtc);
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
+ struct dm_crtc_state *dm_old_crtc_state =
+ to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
int planes_count = 0;
unsigned long flags;
@@ -4037,7 +4104,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
}
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- if (!pflip_needed) {
+ if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) {
WARN_ON(!dm_new_plane_state->dc_state);
plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
@@ -4079,10 +4146,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
- if (false == dc_commit_planes_to_stream(dm->dc,
+
+ if (false == commit_planes_to_stream(dm->dc,
plane_states_constructed,
planes_count,
- dc_stream_attach,
+ acrtc_state,
+ dm_old_crtc_state,
dm_state->context))
dm_error("%s: Failed to attach plane!\n", __func__);
} else {
@@ -4307,8 +4376,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
struct dc_stream_status *status = NULL;
- if (acrtc)
+ if (acrtc) {
new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
/* Skip any modesets/resets */
if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
@@ -4331,11 +4402,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
WARN_ON(!status->plane_count);
/*TODO How it works with MPO ?*/
- if (!dc_commit_planes_to_stream(
+ if (!commit_planes_to_stream(
dm->dc,
status->plane_states,
status->plane_count,
- dm_new_crtc_state->stream,
+ dm_new_crtc_state,
+ to_dm_crtc_state(old_crtc_state),
dm_state->context))
dm_error("%s: Failed to update stream scaling!\n", __func__);
}
@@ -4578,7 +4650,7 @@ static int dm_update_crtcs_state(struct dc *dc,
if (aconnector && enable) {
// Make sure fake sink is created in plug-in scenario
new_con_state = drm_atomic_get_connector_state(state,
- &aconnector->base);
+ &aconnector->base);
if (IS_ERR(new_con_state)) {
ret = PTR_ERR_OR_ZERO(new_con_state);
@@ -4755,7 +4827,8 @@ static int dm_update_planes_state(struct dc *dc,
/* Remove any changed/removed planes */
if (!enable) {
- if (pflip_needed)
+ if (pflip_needed &&
+ plane->type != DRM_PLANE_TYPE_OVERLAY)
continue;
if (!old_plane_crtc)
@@ -4802,7 +4875,8 @@ static int dm_update_planes_state(struct dc *dc,
if (!dm_new_crtc_state->stream)
continue;
- if (pflip_needed)
+ if (pflip_needed &&
+ plane->type != DRM_PLANE_TYPE_OVERLAY)
continue;
WARN_ON(dm_new_plane_state->dc_state);
@@ -5009,17 +5083,24 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
struct edid *edid)
{
int i;
- uint64_t val_capable;
bool edid_check_required;
struct detailed_timing *timing;
struct detailed_non_pixel *data;
struct detailed_data_monitor_range *range;
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_con_state;
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
+ if (!connector->state) {
+ DRM_ERROR("%s - Connector has no state", __func__);
+ return;
+ }
+
+ dm_con_state = to_dm_connector_state(connector->state);
+
edid_check_required = false;
if (!amdgpu_dm_connector->dc_sink) {
DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
@@ -5038,7 +5119,7 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
amdgpu_dm_connector);
}
}
- val_capable = 0;
+ dm_con_state->freesync_capable = false;
if (edid_check_required == true && (edid->version > 1 ||
(edid->version == 1 && edid->revision > 1))) {
for (i = 0; i < 4; i++) {
@@ -5074,7 +5155,7 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
amdgpu_dm_connector->min_vfreq * 1000000;
amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
amdgpu_dm_connector->max_vfreq * 1000000;
- val_capable = 1;
+ dm_con_state->freesync_capable = true;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b68400c..d5aa89a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -28,7 +28,6 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
-#include "dc.h"
/*
* This file contains the definition for amdgpu_display_manager
@@ -53,6 +52,7 @@
struct amdgpu_device;
struct drm_device;
struct amdgpu_dm_irq_handler_data;
+struct dc;
struct amdgpu_dm_prev_state {
struct drm_framebuffer *fb;
@@ -220,6 +220,7 @@ struct dm_connector_state {
uint8_t underscan_hborder;
bool underscan_enable;
struct mod_freesync_user_enable user_enable;
+ bool freesync_capable;
};
#define to_dm_connector_state(x)\
@@ -246,7 +247,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
struct dc_link *link,
int link_index);
-int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode);
void dm_restore_drm_connector_state(struct drm_device *dev,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 25f064c..e3d90e9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -25,6 +25,7 @@
#include "amdgpu_mode.h"
#include "amdgpu_dm.h"
+#include "dc.h"
#include "modules/color/color_gamma.h"
#define MAX_DRM_LUT_VALUE 0xFFFF
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index ace9ad5..4304d9e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -83,21 +83,22 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
I2C_MOT_TRUE : I2C_MOT_FALSE;
enum ddc_result res;
- ssize_t read_bytes;
+ uint32_t read_bytes = msg->size;
if (WARN_ON(msg->size > 16))
return -E2BIG;
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_READ:
- read_bytes = dal_ddc_service_read_dpcd_data(
+ res = dal_ddc_service_read_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
false,
I2C_MOT_UNDEF,
msg->address,
msg->buffer,
- msg->size);
- return read_bytes;
+ msg->size,
+ &read_bytes);
+ break;
case DP_AUX_NATIVE_WRITE:
res = dal_ddc_service_write_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
@@ -108,14 +109,15 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
msg->size);
break;
case DP_AUX_I2C_READ:
- read_bytes = dal_ddc_service_read_dpcd_data(
+ res = dal_ddc_service_read_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
true,
mot,
msg->address,
msg->buffer,
- msg->size);
- return read_bytes;
+ msg->size,
+ &read_bytes);
+ break;
case DP_AUX_I2C_WRITE:
res = dal_ddc_service_write_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
@@ -137,7 +139,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
r == DDC_RESULT_SUCESSFULL);
#endif
- return msg->size;
+ if (res != DDC_RESULT_SUCESSFULL)
+ return -EIO;
+ return read_bytes;
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 89342b4..0229c7ed 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -37,8 +37,17 @@
unsigned long long dm_get_timestamp(struct dc_context *ctx)
{
- /* TODO: return actual timestamp */
- return 0;
+ struct timespec64 time;
+
+ getrawmonotonic64(&time);
+ return timespec64_to_ns(&time);
+}
+
+unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
+ unsigned long long current_time_stamp,
+ unsigned long long last_time_stamp)
+{
+ return current_time_stamp - last_time_stamp;
}
void dm_perf_trace_timestamp(const char *func_name, unsigned int line)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 8a9bba8..7191c32 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -26,13 +26,13 @@
#include "dm_services.h"
#include "include/fixed31_32.h"
-static inline uint64_t abs_i64(
- int64_t arg)
+static inline unsigned long long abs_i64(
+ long long arg)
{
if (arg > 0)
- return (uint64_t)arg;
+ return (unsigned long long)arg;
else
- return (uint64_t)(-arg);
+ return (unsigned long long)(-arg);
}
/*
@@ -40,12 +40,12 @@ static inline uint64_t abs_i64(
* result = dividend / divisor
* *remainder = dividend % divisor
*/
-static inline uint64_t complete_integer_division_u64(
- uint64_t dividend,
- uint64_t divisor,
- uint64_t *remainder)
+static inline unsigned long long complete_integer_division_u64(
+ unsigned long long dividend,
+ unsigned long long divisor,
+ unsigned long long *remainder)
{
- uint64_t result;
+ unsigned long long result;
ASSERT(divisor);
@@ -65,29 +65,29 @@ static inline uint64_t complete_integer_division_u64(
(FRACTIONAL_PART_MASK & (x))
struct fixed31_32 dal_fixed31_32_from_fraction(
- int64_t numerator,
- int64_t denominator)
+ long long numerator,
+ long long denominator)
{
struct fixed31_32 res;
bool arg1_negative = numerator < 0;
bool arg2_negative = denominator < 0;
- uint64_t arg1_value = arg1_negative ? -numerator : numerator;
- uint64_t arg2_value = arg2_negative ? -denominator : denominator;
+ unsigned long long arg1_value = arg1_negative ? -numerator : numerator;
+ unsigned long long arg2_value = arg2_negative ? -denominator : denominator;
- uint64_t remainder;
+ unsigned long long remainder;
/* determine integer part */
- uint64_t res_value = complete_integer_division_u64(
+ unsigned long long res_value = complete_integer_division_u64(
arg1_value, arg2_value, &remainder);
ASSERT(res_value <= LONG_MAX);
/* determine fractional part */
{
- uint32_t i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
+ unsigned int i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
do {
remainder <<= 1;
@@ -103,14 +103,14 @@ struct fixed31_32 dal_fixed31_32_from_fraction(
/* round up LSB */
{
- uint64_t summand = (remainder << 1) >= arg2_value;
+ unsigned long long summand = (remainder << 1) >= arg2_value;
ASSERT(res_value <= LLONG_MAX - summand);
res_value += summand;
}
- res.value = (int64_t)res_value;
+ res.value = (long long)res_value;
if (arg1_negative ^ arg2_negative)
res.value = -res.value;
@@ -119,7 +119,7 @@ struct fixed31_32 dal_fixed31_32_from_fraction(
}
struct fixed31_32 dal_fixed31_32_from_int_nonconst(
- int64_t arg)
+ long long arg)
{
struct fixed31_32 res;
@@ -132,7 +132,7 @@ struct fixed31_32 dal_fixed31_32_from_int_nonconst(
struct fixed31_32 dal_fixed31_32_shl(
struct fixed31_32 arg,
- uint8_t shift)
+ unsigned char shift)
{
struct fixed31_32 res;
@@ -181,16 +181,16 @@ struct fixed31_32 dal_fixed31_32_mul(
bool arg1_negative = arg1.value < 0;
bool arg2_negative = arg2.value < 0;
- uint64_t arg1_value = arg1_negative ? -arg1.value : arg1.value;
- uint64_t arg2_value = arg2_negative ? -arg2.value : arg2.value;
+ unsigned long long arg1_value = arg1_negative ? -arg1.value : arg1.value;
+ unsigned long long arg2_value = arg2_negative ? -arg2.value : arg2.value;
- uint64_t arg1_int = GET_INTEGER_PART(arg1_value);
- uint64_t arg2_int = GET_INTEGER_PART(arg2_value);
+ unsigned long long arg1_int = GET_INTEGER_PART(arg1_value);
+ unsigned long long arg2_int = GET_INTEGER_PART(arg2_value);
- uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
- uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+ unsigned long long arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+ unsigned long long arg2_fra = GET_FRACTIONAL_PART(arg2_value);
- uint64_t tmp;
+ unsigned long long tmp;
res.value = arg1_int * arg2_int;
@@ -200,22 +200,22 @@ struct fixed31_32 dal_fixed31_32_mul(
tmp = arg1_int * arg2_fra;
- ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
tmp = arg2_int * arg1_fra;
- ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
tmp = arg1_fra * arg2_fra;
tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
- (tmp >= (uint64_t)dal_fixed31_32_half.value);
+ (tmp >= (unsigned long long)dal_fixed31_32_half.value);
- ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
@@ -230,13 +230,13 @@ struct fixed31_32 dal_fixed31_32_sqr(
{
struct fixed31_32 res;
- uint64_t arg_value = abs_i64(arg.value);
+ unsigned long long arg_value = abs_i64(arg.value);
- uint64_t arg_int = GET_INTEGER_PART(arg_value);
+ unsigned long long arg_int = GET_INTEGER_PART(arg_value);
- uint64_t arg_fra = GET_FRACTIONAL_PART(arg_value);
+ unsigned long long arg_fra = GET_FRACTIONAL_PART(arg_value);
- uint64_t tmp;
+ unsigned long long tmp;
res.value = arg_int * arg_int;
@@ -246,20 +246,20 @@ struct fixed31_32 dal_fixed31_32_sqr(
tmp = arg_int * arg_fra;
- ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
- ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
tmp = arg_fra * arg_fra;
tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
- (tmp >= (uint64_t)dal_fixed31_32_half.value);
+ (tmp >= (unsigned long long)dal_fixed31_32_half.value);
- ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
res.value += tmp;
@@ -288,7 +288,7 @@ struct fixed31_32 dal_fixed31_32_sinc(
struct fixed31_32 res = dal_fixed31_32_one;
- int32_t n = 27;
+ int n = 27;
struct fixed31_32 arg_norm = arg;
@@ -299,7 +299,7 @@ struct fixed31_32 dal_fixed31_32_sinc(
arg_norm,
dal_fixed31_32_mul_int(
dal_fixed31_32_two_pi,
- (int32_t)div64_s64(
+ (int)div64_s64(
arg_norm.value,
dal_fixed31_32_two_pi.value)));
}
@@ -343,7 +343,7 @@ struct fixed31_32 dal_fixed31_32_cos(
struct fixed31_32 res = dal_fixed31_32_one;
- int32_t n = 26;
+ int n = 26;
do {
res = dal_fixed31_32_sub(
@@ -370,7 +370,7 @@ struct fixed31_32 dal_fixed31_32_cos(
static struct fixed31_32 fixed31_32_exp_from_taylor_series(
struct fixed31_32 arg)
{
- uint32_t n = 9;
+ unsigned int n = 9;
struct fixed31_32 res = dal_fixed31_32_from_fraction(
n + 2,
@@ -409,7 +409,7 @@ struct fixed31_32 dal_fixed31_32_exp(
if (dal_fixed31_32_le(
dal_fixed31_32_ln2_div_2,
dal_fixed31_32_abs(arg))) {
- int32_t m = dal_fixed31_32_round(
+ int m = dal_fixed31_32_round(
dal_fixed31_32_div(
arg,
dal_fixed31_32_ln2));
@@ -429,7 +429,7 @@ struct fixed31_32 dal_fixed31_32_exp(
if (m > 0)
return dal_fixed31_32_shl(
fixed31_32_exp_from_taylor_series(r),
- (uint8_t)m);
+ (unsigned char)m);
else
return dal_fixed31_32_div_int(
fixed31_32_exp_from_taylor_series(r),
@@ -482,50 +482,50 @@ struct fixed31_32 dal_fixed31_32_pow(
arg2));
}
-int32_t dal_fixed31_32_floor(
+int dal_fixed31_32_floor(
struct fixed31_32 arg)
{
- uint64_t arg_value = abs_i64(arg.value);
+ unsigned long long arg_value = abs_i64(arg.value);
if (arg.value >= 0)
- return (int32_t)GET_INTEGER_PART(arg_value);
+ return (int)GET_INTEGER_PART(arg_value);
else
- return -(int32_t)GET_INTEGER_PART(arg_value);
+ return -(int)GET_INTEGER_PART(arg_value);
}
-int32_t dal_fixed31_32_round(
+int dal_fixed31_32_round(
struct fixed31_32 arg)
{
- uint64_t arg_value = abs_i64(arg.value);
+ unsigned long long arg_value = abs_i64(arg.value);
- const int64_t summand = dal_fixed31_32_half.value;
+ const long long summand = dal_fixed31_32_half.value;
- ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+ ASSERT(LLONG_MAX - (long long)arg_value >= summand);
arg_value += summand;
if (arg.value >= 0)
- return (int32_t)GET_INTEGER_PART(arg_value);
+ return (int)GET_INTEGER_PART(arg_value);
else
- return -(int32_t)GET_INTEGER_PART(arg_value);
+ return -(int)GET_INTEGER_PART(arg_value);
}
-int32_t dal_fixed31_32_ceil(
+int dal_fixed31_32_ceil(
struct fixed31_32 arg)
{
- uint64_t arg_value = abs_i64(arg.value);
+ unsigned long long arg_value = abs_i64(arg.value);
- const int64_t summand = dal_fixed31_32_one.value -
+ const long long summand = dal_fixed31_32_one.value -
dal_fixed31_32_epsilon.value;
- ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+ ASSERT(LLONG_MAX - (long long)arg_value >= summand);
arg_value += summand;
if (arg.value >= 0)
- return (int32_t)GET_INTEGER_PART(arg_value);
+ return (int)GET_INTEGER_PART(arg_value);
else
- return -(int32_t)GET_INTEGER_PART(arg_value);
+ return -(int)GET_INTEGER_PART(arg_value);
}
/* this function is a generic helper to translate fixed point value to
@@ -535,15 +535,15 @@ int32_t dal_fixed31_32_ceil(
* part in 32 bits. It is used in hw programming (scaler)
*/
-static inline uint32_t ux_dy(
- int64_t value,
- uint32_t integer_bits,
- uint32_t fractional_bits)
+static inline unsigned int ux_dy(
+ long long value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits)
{
/* 1. create mask of integer part */
- uint32_t result = (1 << integer_bits) - 1;
+ unsigned int result = (1 << integer_bits) - 1;
/* 2. mask out fractional part */
- uint32_t fractional_part = FRACTIONAL_PART_MASK & value;
+ unsigned int fractional_part = FRACTIONAL_PART_MASK & value;
/* 3. shrink fixed point integer part to be of integer_bits width*/
result &= GET_INTEGER_PART(value);
/* 4. make space for fractional part to be filled in after integer */
@@ -554,13 +554,13 @@ static inline uint32_t ux_dy(
return result | fractional_part;
}
-static inline uint32_t clamp_ux_dy(
- int64_t value,
- uint32_t integer_bits,
- uint32_t fractional_bits,
- uint32_t min_clamp)
+static inline unsigned int clamp_ux_dy(
+ long long value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits,
+ unsigned int min_clamp)
{
- uint32_t truncated_val = ux_dy(value, integer_bits, fractional_bits);
+ unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits);
if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART)))
return (1 << (integer_bits + fractional_bits)) - 1;
@@ -570,35 +570,35 @@ static inline uint32_t clamp_ux_dy(
return min_clamp;
}
-uint32_t dal_fixed31_32_u2d19(
+unsigned int dal_fixed31_32_u2d19(
struct fixed31_32 arg)
{
return ux_dy(arg.value, 2, 19);
}
-uint32_t dal_fixed31_32_u0d19(
+unsigned int dal_fixed31_32_u0d19(
struct fixed31_32 arg)
{
return ux_dy(arg.value, 0, 19);
}
-uint32_t dal_fixed31_32_clamp_u0d14(
+unsigned int dal_fixed31_32_clamp_u0d14(
struct fixed31_32 arg)
{
return clamp_ux_dy(arg.value, 0, 14, 1);
}
-uint32_t dal_fixed31_32_clamp_u0d10(
+unsigned int dal_fixed31_32_clamp_u0d10(
struct fixed31_32 arg)
{
return clamp_ux_dy(arg.value, 0, 10, 1);
}
-int32_t dal_fixed31_32_s4d19(
+int dal_fixed31_32_s4d19(
struct fixed31_32 arg)
{
if (arg.value < 0)
- return -(int32_t)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19);
+ return -(int)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19);
else
return ux_dy(arg.value, 4, 19);
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 985fe8c..10a5807 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -70,6 +70,10 @@ static enum bp_result get_firmware_info_v3_1(
struct bios_parser *bp,
struct dc_firmware_info *info);
+static enum bp_result get_firmware_info_v3_2(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+
static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
struct atom_display_object_path_v2 *object);
@@ -1321,9 +1325,11 @@ static enum bp_result bios_parser_get_firmware_info(
case 3:
switch (revision.minor) {
case 1:
- case 2:
result = get_firmware_info_v3_1(bp, info);
break;
+ case 2:
+ result = get_firmware_info_v3_2(bp, info);
+ break;
default:
break;
}
@@ -1383,6 +1389,84 @@ static enum bp_result get_firmware_info_v3_1(
return BP_RESULT_OK;
}
+static enum bp_result get_firmware_info_v3_2(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ struct atom_firmware_info_v3_2 *firmware_info;
+ struct atom_display_controller_info_v4_1 *dce_info = NULL;
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision;
+ struct atom_smu_info_v3_2 *smu_info_v3_2 = NULL;
+ struct atom_smu_info_v3_3 *smu_info_v3_3 = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ firmware_info = GET_IMAGE(struct atom_firmware_info_v3_2,
+ DATA_TABLES(firmwareinfo));
+
+ dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1,
+ DATA_TABLES(dce_info));
+
+ if (!firmware_info || !dce_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(smu_info));
+ get_atom_data_table_revision(header, &revision);
+
+ if (revision.minor == 2) {
+ /* Vega12 */
+ smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
+ DATA_TABLES(smu_info));
+
+ if (!smu_info_v3_2)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
+ } else if (revision.minor == 3) {
+ /* Vega20 */
+ smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
+ DATA_TABLES(smu_info));
+
+ if (!smu_info_v3_3)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
+ }
+
+ // We need to convert from 10KHz units into KHz units.
+ info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10;
+
+ /* 27MHz for Vega10 & Vega12; 100MHz for Vega20 */
+ info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10;
+ /* Hardcode frequency if BIOS gives no DCE Ref Clk */
+ if (info->pll_info.crystal_frequency == 0) {
+ if (revision.minor == 2)
+ info->pll_info.crystal_frequency = 27000;
+ else if (revision.minor == 3)
+ info->pll_info.crystal_frequency = 100000;
+ }
+ /*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/
+ info->dp_phy_ref_clk = dce_info->dpphy_refclk_10khz * 10;
+ info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10;
+
+ /* Get GPU PLL VCO Clock */
+ if (bp->cmd_tbl.get_smu_clock_info != NULL) {
+ if (revision.minor == 2)
+ info->smu_gpu_pll_output_freq =
+ bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
+ else if (revision.minor == 3)
+ info->smu_gpu_pll_output_freq =
+ bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10;
+ }
+
+ return BP_RESULT_OK;
+}
+
static enum bp_result bios_parser_get_encoder_cap_info(
struct dc_bios *dcb,
struct graphics_object_id object_id,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 4b5fdd5..651e1fd 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -24,7 +24,7 @@
*/
#include "dm_services.h"
-
+#include "amdgpu.h"
#include "atom.h"
#include "include/bios_parser_interface.h"
@@ -35,16 +35,16 @@
#include "bios_parser_types_internal.h"
#define EXEC_BIOS_CMD_TABLE(command, params)\
- (cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
+ (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
GetIndexIntoMasterTable(COMMAND, command), \
- &params) == 0)
+ (uint32_t *)&params) == 0)
#define BIOS_CMD_TABLE_REVISION(command, frev, crev)\
- cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
+ amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
GetIndexIntoMasterTable(COMMAND, command), &frev, &crev)
#define BIOS_CMD_TABLE_PARA_REVISION(command)\
- bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
+ bios_cmd_table_para_revision(bp->base.ctx->driver_context, \
GetIndexIntoMasterTable(COMMAND, command))
static void init_dig_encoder_control(struct bios_parser *bp);
@@ -82,16 +82,18 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
init_set_dce_clock(bp);
}
-static uint32_t bios_cmd_table_para_revision(void *cgs_device,
+static uint32_t bios_cmd_table_para_revision(void *dev,
uint32_t index)
{
+ struct amdgpu_device *adev = dev;
uint8_t frev, crev;
- if (cgs_atom_get_cmd_table_revs(cgs_device,
+ if (amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context,
index,
- &frev, &crev) != 0)
+ &frev, &crev))
+ return crev;
+ else
return 0;
- return crev;
}
/*******************************************************************************
@@ -368,7 +370,7 @@ static void init_transmitter_control(struct bios_parser *bp)
uint8_t crev;
if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl,
- frev, crev) != 0)
+ frev, crev) == false)
BREAK_TO_DEBUGGER();
switch (crev) {
case 2:
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 3f63f71..752b08a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -26,14 +26,18 @@
#include "dm_services.h"
#include "ObjectID.h"
-#include "atomfirmware.h"
+#include "atomfirmware.h"
+#include "atom.h"
#include "include/bios_parser_interface.h"
#include "command_table2.h"
#include "command_table_helper2.h"
#include "bios_parser_helper.h"
#include "bios_parser_types_internal2.h"
+#include "amdgpu.h"
+
+
#define DC_LOGGER \
bp->base.ctx->logger
@@ -43,16 +47,16 @@
->FieldName)-(char *)0)/sizeof(uint16_t))
#define EXEC_BIOS_CMD_TABLE(fname, params)\
- (cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
+ (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
GET_INDEX_INTO_MASTER_TABLE(command, fname), \
- &params) == 0)
+ (uint32_t *)&params) == 0)
#define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\
- cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
+ amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
GET_INDEX_INTO_MASTER_TABLE(command, fname), &frev, &crev)
#define BIOS_CMD_TABLE_PARA_REVISION(fname)\
- bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
+ bios_cmd_table_para_revision(bp->base.ctx->driver_context, \
GET_INDEX_INTO_MASTER_TABLE(command, fname))
static void init_dig_encoder_control(struct bios_parser *bp);
@@ -86,16 +90,18 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
init_get_smu_clock_info(bp);
}
-static uint32_t bios_cmd_table_para_revision(void *cgs_device,
+static uint32_t bios_cmd_table_para_revision(void *dev,
uint32_t index)
{
+ struct amdgpu_device *adev = dev;
uint8_t frev, crev;
- if (cgs_atom_get_cmd_table_revs(cgs_device,
+ if (amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context,
index,
- &frev, &crev) != 0)
+ &frev, &crev))
+ return crev;
+ else
return 0;
- return crev;
}
/******************************************************************************
@@ -201,7 +207,7 @@ static void init_transmitter_control(struct bios_parser *bp)
uint8_t frev;
uint8_t crev;
- if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) != 0)
+ if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) == false)
BREAK_TO_DEBUGGER();
switch (crev) {
case 6:
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
index 2979358..be066c4 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -51,6 +51,9 @@ bool dal_bios_parser_init_cmd_tbl_helper(
return true;
case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+#endif
*h = dal_cmd_tbl_helper_dce112_get_table();
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 9a4d30d..9b9e069 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -52,6 +52,9 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
return true;
case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+#endif
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
new file mode 100644
index 0000000..fc3f98f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
@@ -0,0 +1,579 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _CALCS_CALCS_LOGGER_H_
+#define _CALCS_CALCS_LOGGER_H_
+#define DC_LOGGER \
+ logger
+
+static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calcs_dceip *dceip)
+{
+
+ DC_LOG_BANDWIDTH_CALCS("#####################################################################");
+ DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_dceip");
+ DC_LOG_BANDWIDTH_CALCS("#####################################################################");
+ DC_LOG_BANDWIDTH_CALCS(" [enum] bw_calcs_version version %d", dceip->version);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] large_cursor: %d", dceip->large_cursor);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] dmif_pipe_en_fbc_chunk_tracker: %d", dceip->dmif_pipe_en_fbc_chunk_tracker);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] display_write_back_supported: %d", dceip->display_write_back_supported);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] argb_compression_support: %d", dceip->argb_compression_support);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] pre_downscaler_enabled: %d", dceip->pre_downscaler_enabled);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] underlay_downscale_prefetch_enabled: %d",
+ dceip->underlay_downscale_prefetch_enabled);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] graphics_lb_nodownscaling_multi_line_prefetching: %d",
+ dceip->graphics_lb_nodownscaling_multi_line_prefetching);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] limit_excessive_outstanding_dmif_requests: %d",
+ dceip->limit_excessive_outstanding_dmif_requests);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] cursor_max_outstanding_group_num: %d",
+ dceip->cursor_max_outstanding_group_num);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] lines_interleaved_into_lb: %d", dceip->lines_interleaved_into_lb);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] low_power_tiling_mode: %d", dceip->low_power_tiling_mode);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] chunk_width: %d", dceip->chunk_width);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_graphics_pipes: %d", dceip->number_of_graphics_pipes);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_underlay_pipes: %d", dceip->number_of_underlay_pipes);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] max_dmif_buffer_allocated: %d", dceip->max_dmif_buffer_allocated);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] graphics_dmif_size: %d", dceip->graphics_dmif_size);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_luma_dmif_size: %d", dceip->underlay_luma_dmif_size);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_chroma_dmif_size: %d", dceip->underlay_chroma_dmif_size);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] scatter_gather_lines_of_pte_prefetching_in_linear_mode: %d",
+ dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] display_write_back420_luma_mcifwr_buffer_size: %d",
+ dceip->display_write_back420_luma_mcifwr_buffer_size);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] display_write_back420_chroma_mcifwr_buffer_size: %d",
+ dceip->display_write_back420_chroma_mcifwr_buffer_size);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] scatter_gather_pte_request_rows_in_tiling_mode: %d",
+ dceip->scatter_gather_pte_request_rows_in_tiling_mode);
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_vscaler_efficiency10_bit_per_component: %d",
+ bw_fixed_to_int(dceip->underlay_vscaler_efficiency10_bit_per_component));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_vscaler_efficiency12_bit_per_component: %d",
+ bw_fixed_to_int(dceip->underlay_vscaler_efficiency12_bit_per_component));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency6_bit_per_component: %d",
+ bw_fixed_to_int(dceip->graphics_vscaler_efficiency6_bit_per_component));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency8_bit_per_component: %d",
+ bw_fixed_to_int(dceip->graphics_vscaler_efficiency8_bit_per_component));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency10_bit_per_component: %d",
+ bw_fixed_to_int(dceip->graphics_vscaler_efficiency10_bit_per_component));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency12_bit_per_component: %d",
+ bw_fixed_to_int(dceip->graphics_vscaler_efficiency12_bit_per_component));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] alpha_vscaler_efficiency: %d",
+ bw_fixed_to_int(dceip->alpha_vscaler_efficiency));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_write_pixels_per_dispclk: %d",
+ bw_fixed_to_int(dceip->lb_write_pixels_per_dispclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_size_per_component444: %d",
+ bw_fixed_to_int(dceip->lb_size_per_component444));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_and_dram_clock_state_change_gated_before_cursor: %d",
+ bw_fixed_to_int(dceip->stutter_and_dram_clock_state_change_gated_before_cursor));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay420_luma_lb_size_per_component: %d",
+ bw_fixed_to_int(dceip->underlay420_luma_lb_size_per_component));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay420_chroma_lb_size_per_component: %d",
+ bw_fixed_to_int(dceip->underlay420_chroma_lb_size_per_component));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay422_lb_size_per_component: %d",
+ bw_fixed_to_int(dceip->underlay422_lb_size_per_component));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_chunk_width: %d", bw_fixed_to_int(dceip->cursor_chunk_width));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_dcp_buffer_lines: %d",
+ bw_fixed_to_int(dceip->cursor_dcp_buffer_lines));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_width_efficient_for_tiling: %d",
+ bw_fixed_to_int(dceip->underlay_maximum_width_efficient_for_tiling));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_height_efficient_for_tiling: %d",
+ bw_fixed_to_int(dceip->underlay_maximum_height_efficient_for_tiling));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display: %d",
+ bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation: %d",
+ bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_outstanding_pte_request_limit: %d",
+ bw_fixed_to_int(dceip->minimum_outstanding_pte_request_limit));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_total_outstanding_pte_requests_allowed_by_saw: %d",
+ bw_fixed_to_int(dceip->maximum_total_outstanding_pte_requests_allowed_by_saw));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] linear_mode_line_request_alternation_slice: %d",
+ bw_fixed_to_int(dceip->linear_mode_line_request_alternation_slice));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] request_efficiency: %d", bw_fixed_to_int(dceip->request_efficiency));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_per_request: %d", bw_fixed_to_int(dceip->dispclk_per_request));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_ramping_factor: %d",
+ bw_fixed_to_int(dceip->dispclk_ramping_factor));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_pipe_throughput_factor: %d",
+ bw_fixed_to_int(dceip->display_pipe_throughput_factor));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_all_surfaces_burst_time: %d",
+ bw_fixed_to_int(dceip->mcifwr_all_surfaces_burst_time));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_request_buffer_size: %d",
+ bw_fixed_to_int(dceip->dmif_request_buffer_size));
+
+
+}
+
+static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calcs_vbios *vbios)
+{
+
+ DC_LOG_BANDWIDTH_CALCS("#####################################################################");
+ DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_vbios vbios");
+ DC_LOG_BANDWIDTH_CALCS("#####################################################################");
+ DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines memory_type: %d", vbios->memory_type);
+ DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines memory_type: %d", vbios->memory_type);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] dram_channel_width_in_bits: %d", vbios->dram_channel_width_in_bits);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_channels: %d", vbios->number_of_dram_channels);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_banks: %d", vbios->number_of_dram_banks);
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_yclk: %d", bw_fixed_to_int(vbios->low_yclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_yclk: %d", bw_fixed_to_int(vbios->mid_yclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_yclk: %d", bw_fixed_to_int(vbios->high_yclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_sclk: %d", bw_fixed_to_int(vbios->low_sclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid1_sclk: %d", bw_fixed_to_int(vbios->mid1_sclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid2_sclk: %d", bw_fixed_to_int(vbios->mid2_sclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid3_sclk: %d", bw_fixed_to_int(vbios->mid3_sclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid4_sclk: %d", bw_fixed_to_int(vbios->mid4_sclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid5_sclk: %d", bw_fixed_to_int(vbios->mid5_sclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid6_sclk: %d", bw_fixed_to_int(vbios->mid6_sclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_sclk: %d", bw_fixed_to_int(vbios->high_sclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_voltage_max_dispclk: %d",
+ bw_fixed_to_int(vbios->low_voltage_max_dispclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_voltage_max_dispclk;: %d",
+ bw_fixed_to_int(vbios->mid_voltage_max_dispclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_voltage_max_dispclk;: %d",
+ bw_fixed_to_int(vbios->high_voltage_max_dispclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_voltage_max_phyclk: %d",
+ bw_fixed_to_int(vbios->low_voltage_max_phyclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_voltage_max_phyclk: %d",
+ bw_fixed_to_int(vbios->mid_voltage_max_phyclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_voltage_max_phyclk: %d",
+ bw_fixed_to_int(vbios->high_voltage_max_phyclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] data_return_bus_width: %d", bw_fixed_to_int(vbios->data_return_bus_width));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] trc: %d", bw_fixed_to_int(vbios->trc));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifmc_urgent_latency: %d", bw_fixed_to_int(vbios->dmifmc_urgent_latency));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_self_refresh_exit_latency: %d",
+ bw_fixed_to_int(vbios->stutter_self_refresh_exit_latency));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_self_refresh_entry_latency: %d",
+ bw_fixed_to_int(vbios->stutter_self_refresh_entry_latency));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_change_latency: %d",
+ bw_fixed_to_int(vbios->nbp_state_change_latency));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwrmc_urgent_latency: %d",
+ bw_fixed_to_int(vbios->mcifwrmc_urgent_latency));
+ DC_LOG_BANDWIDTH_CALCS(" [bool] scatter_gather_enable: %d", vbios->scatter_gather_enable);
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] down_spread_percentage: %d",
+ bw_fixed_to_int(vbios->down_spread_percentage));
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] cursor_width: %d", vbios->cursor_width);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] average_compression_rate: %d", vbios->average_compression_rate);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_request_slots_gmc_reserves_for_dmif_per_channel: %d",
+ vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel);
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_duration: %d", bw_fixed_to_int(vbios->blackout_duration));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_blackout_recovery_time: %d",
+ bw_fixed_to_int(vbios->maximum_blackout_recovery_time));
+
+
+}
+
+static void print_bw_calcs_data(struct dal_logger *logger, struct bw_calcs_data *data)
+{
+
+ int i, j, k;
+
+ DC_LOG_BANDWIDTH_CALCS("#####################################################################");
+ DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_data data");
+ DC_LOG_BANDWIDTH_CALCS("#####################################################################");
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_displays: %d", data->number_of_displays);
+ DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_surface_type: %d", data->underlay_surface_type);
+ DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines panning_and_bezel_adjustment: %d",
+ data->panning_and_bezel_adjustment);
+ DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines graphics_tiling_mode: %d", data->graphics_tiling_mode);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] graphics_lb_bpc: %d", data->graphics_lb_bpc);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_lb_bpc: %d", data->underlay_lb_bpc);
+ DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_tiling_mode: %d", data->underlay_tiling_mode);
+ DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines d0_underlay_mode: %d", data->d0_underlay_mode);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] d1_display_write_back_dwb_enable: %d", data->d1_display_write_back_dwb_enable);
+ DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines d1_underlay_mode: %d", data->d1_underlay_mode);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] cpup_state_change_enable: %d", data->cpup_state_change_enable);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] cpuc_state_change_enable: %d", data->cpuc_state_change_enable);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] nbp_state_change_enable: %d", data->nbp_state_change_enable);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] stutter_mode_enable: %d", data->stutter_mode_enable);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] y_clk_level: %d", data->y_clk_level);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] sclk_level: %d", data->sclk_level);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_underlay_surfaces: %d", data->number_of_underlay_surfaces);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_wrchannels: %d", data->number_of_dram_wrchannels);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] chunk_request_delay: %d", data->chunk_request_delay);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_channels: %d", data->number_of_dram_channels);
+ DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_micro_tile_mode: %d", data->underlay_micro_tile_mode);
+ DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines graphics_micro_tile_mode: %d", data->graphics_micro_tile_mode);
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] max_phyclk: %d", bw_fixed_to_int(data->max_phyclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_efficiency: %d", bw_fixed_to_int(data->dram_efficiency));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width_after_surface_type: %d",
+ bw_fixed_to_int(data->src_width_after_surface_type));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height_after_surface_type: %d",
+ bw_fixed_to_int(data->src_height_after_surface_type));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_surface_type: %d",
+ bw_fixed_to_int(data->hsr_after_surface_type));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_surface_type: %d", bw_fixed_to_int(data->vsr_after_surface_type));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width_after_rotation: %d",
+ bw_fixed_to_int(data->src_width_after_rotation));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height_after_rotation: %d",
+ bw_fixed_to_int(data->src_height_after_rotation));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_rotation: %d", bw_fixed_to_int(data->hsr_after_rotation));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_rotation: %d", bw_fixed_to_int(data->vsr_after_rotation));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_height_pixels: %d", bw_fixed_to_int(data->source_height_pixels));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_stereo: %d", bw_fixed_to_int(data->hsr_after_stereo));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_stereo: %d", bw_fixed_to_int(data->vsr_after_stereo));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_in_lb: %d", bw_fixed_to_int(data->source_width_in_lb));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_line_pitch: %d", bw_fixed_to_int(data->lb_line_pitch));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_source_efficient_for_tiling: %d",
+ bw_fixed_to_int(data->underlay_maximum_source_efficient_for_tiling));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] num_lines_at_frame_start: %d",
+ bw_fixed_to_int(data->num_lines_at_frame_start));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_dmif_size_in_time: %d", bw_fixed_to_int(data->min_dmif_size_in_time));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_mcifwr_size_in_time: %d",
+ bw_fixed_to_int(data->min_mcifwr_size_in_time));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_requests_for_dmif_size: %d",
+ bw_fixed_to_int(data->total_requests_for_dmif_size));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting: %d",
+ bw_fixed_to_int(data->peak_pte_request_to_eviction_ratio_limiting));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] useful_pte_per_pte_request: %d",
+ bw_fixed_to_int(data->useful_pte_per_pte_request));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_request_rows: %d",
+ bw_fixed_to_int(data->scatter_gather_pte_request_rows));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_row_height: %d",
+ bw_fixed_to_int(data->scatter_gather_row_height));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_requests_in_vblank: %d",
+ bw_fixed_to_int(data->scatter_gather_pte_requests_in_vblank));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] inefficient_linear_pitch_in_bytes: %d",
+ bw_fixed_to_int(data->inefficient_linear_pitch_in_bytes));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_total_data: %d", bw_fixed_to_int(data->cursor_total_data));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_total_request_groups: %d",
+ bw_fixed_to_int(data->cursor_total_request_groups));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_total_pte_requests: %d",
+ bw_fixed_to_int(data->scatter_gather_total_pte_requests));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_total_pte_request_groups: %d",
+ bw_fixed_to_int(data->scatter_gather_total_pte_request_groups));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] tile_width_in_pixels: %d", bw_fixed_to_int(data->tile_width_in_pixels));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_total_number_of_data_request_page_close_open: %d",
+ bw_fixed_to_int(data->dmif_total_number_of_data_request_page_close_open));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_total_number_of_data_request_page_close_open: %d",
+ bw_fixed_to_int(data->mcifwr_total_number_of_data_request_page_close_open));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] bytes_per_page_close_open: %d",
+ bw_fixed_to_int(data->bytes_per_page_close_open));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_total_page_close_open_time: %d",
+ bw_fixed_to_int(data->mcifwr_total_page_close_open_time));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_requests_for_adjusted_dmif_size: %d",
+ bw_fixed_to_int(data->total_requests_for_adjusted_dmif_size));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dmifmc_urgent_trips: %d",
+ bw_fixed_to_int(data->total_dmifmc_urgent_trips));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dmifmc_urgent_latency: %d",
+ bw_fixed_to_int(data->total_dmifmc_urgent_latency));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_reads_required_data: %d",
+ bw_fixed_to_int(data->total_display_reads_required_data));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_reads_required_dram_access_data: %d",
+ bw_fixed_to_int(data->total_display_reads_required_dram_access_data));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_writes_required_data: %d",
+ bw_fixed_to_int(data->total_display_writes_required_data));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_writes_required_dram_access_data: %d",
+ bw_fixed_to_int(data->total_display_writes_required_dram_access_data));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_required_data: %d",
+ bw_fixed_to_int(data->display_reads_required_data));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_required_dram_access_data: %d",
+ bw_fixed_to_int(data->display_reads_required_dram_access_data));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_total_page_close_open_time: %d",
+ bw_fixed_to_int(data->dmif_total_page_close_open_time));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_cursor_memory_interface_buffer_size_in_time: %d",
+ bw_fixed_to_int(data->min_cursor_memory_interface_buffer_size_in_time));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_read_buffer_size_in_time: %d",
+ bw_fixed_to_int(data->min_read_buffer_size_in_time));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_time_for_data_transfer: %d",
+ bw_fixed_to_int(data->display_reads_time_for_data_transfer));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_writes_time_for_data_transfer: %d",
+ bw_fixed_to_int(data->display_writes_time_for_data_transfer));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_dram_bandwidth: %d",
+ bw_fixed_to_int(data->dmif_required_dram_bandwidth));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_required_dram_bandwidth: %d",
+ bw_fixed_to_int(data->mcifwr_required_dram_bandwidth));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_dmifmc_urgent_latency_for_page_close_open: %d",
+ bw_fixed_to_int(data->required_dmifmc_urgent_latency_for_page_close_open));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_mcifmcwr_urgent_latency: %d",
+ bw_fixed_to_int(data->required_mcifmcwr_urgent_latency));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_dram_bandwidth_gbyte_per_second: %d",
+ bw_fixed_to_int(data->required_dram_bandwidth_gbyte_per_second));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_bandwidth: %d", bw_fixed_to_int(data->dram_bandwidth));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_sclk: %d", bw_fixed_to_int(data->dmif_required_sclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_required_sclk: %d", bw_fixed_to_int(data->mcifwr_required_sclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_sclk: %d", bw_fixed_to_int(data->required_sclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] downspread_factor: %d", bw_fixed_to_int(data->downspread_factor));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_scaler_efficiency: %d", bw_fixed_to_int(data->v_scaler_efficiency));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scaler_limits_factor: %d", bw_fixed_to_int(data->scaler_limits_factor));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_pipe_pixel_throughput: %d",
+ bw_fixed_to_int(data->display_pipe_pixel_throughput));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_with_ramping: %d",
+ bw_fixed_to_int(data->total_dispclk_required_with_ramping));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_without_ramping: %d",
+ bw_fixed_to_int(data->total_dispclk_required_without_ramping));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_read_request_bandwidth: %d",
+ bw_fixed_to_int(data->total_read_request_bandwidth));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_write_request_bandwidth: %d",
+ bw_fixed_to_int(data->total_write_request_bandwidth));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_total_read_request_bandwidth: %d",
+ bw_fixed_to_int(data->dispclk_required_for_total_read_request_bandwidth));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_with_ramping_with_request_bandwidth: %d",
+ bw_fixed_to_int(data->total_dispclk_required_with_ramping_with_request_bandwidth));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_without_ramping_with_request_bandwidth: %d",
+ bw_fixed_to_int(data->total_dispclk_required_without_ramping_with_request_bandwidth));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk: %d", bw_fixed_to_int(data->dispclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_recovery_time: %d", bw_fixed_to_int(data->blackout_recovery_time));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_pixels_per_data_fifo_entry: %d",
+ bw_fixed_to_int(data->min_pixels_per_data_fifo_entry));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] sclk_deep_sleep: %d", bw_fixed_to_int(data->sclk_deep_sleep));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] chunk_request_time: %d", bw_fixed_to_int(data->chunk_request_time));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_request_time: %d", bw_fixed_to_int(data->cursor_request_time));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] line_source_pixels_transfer_time: %d",
+ bw_fixed_to_int(data->line_source_pixels_transfer_time));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifdram_access_efficiency: %d",
+ bw_fixed_to_int(data->dmifdram_access_efficiency));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwrdram_access_efficiency: %d",
+ bw_fixed_to_int(data->mcifwrdram_access_efficiency));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_average_bandwidth_no_compression: %d",
+ bw_fixed_to_int(data->total_average_bandwidth_no_compression));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_average_bandwidth: %d",
+ bw_fixed_to_int(data->total_average_bandwidth));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_stutter_cycle_duration: %d",
+ bw_fixed_to_int(data->total_stutter_cycle_duration));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_burst_time: %d", bw_fixed_to_int(data->stutter_burst_time));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] time_in_self_refresh: %d", bw_fixed_to_int(data->time_in_self_refresh));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_efficiency: %d", bw_fixed_to_int(data->stutter_efficiency));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] worst_number_of_trips_to_memory: %d",
+ bw_fixed_to_int(data->worst_number_of_trips_to_memory));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] immediate_flip_time: %d", bw_fixed_to_int(data->immediate_flip_time));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_for_non_dmif_clients: %d",
+ bw_fixed_to_int(data->latency_for_non_dmif_clients));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_for_non_mcifwr_clients: %d",
+ bw_fixed_to_int(data->latency_for_non_mcifwr_clients));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifmc_urgent_latency_supported_in_high_sclk_and_yclk: %d",
+ bw_fixed_to_int(data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_dram_speed_change_margin: %d",
+ bw_fixed_to_int(data->nbp_state_dram_speed_change_margin));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_time_for_data_transfer_and_urgent_latency: %d",
+ bw_fixed_to_int(data->display_reads_time_for_data_transfer_and_urgent_latency));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_speed_change_margin: %d",
+ bw_fixed_to_int(data->dram_speed_change_margin));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_vblank_dram_speed_change_margin: %d",
+ bw_fixed_to_int(data->min_vblank_dram_speed_change_margin));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_stutter_refresh_duration: %d",
+ bw_fixed_to_int(data->min_stutter_refresh_duration));
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] total_stutter_dmif_buffer_size: %d", data->total_stutter_dmif_buffer_size);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] total_bytes_requested: %d", data->total_bytes_requested);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] min_stutter_dmif_buffer_size: %d", data->min_stutter_dmif_buffer_size);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] num_stutter_bursts: %d", data->num_stutter_bursts);
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_blank_nbp_state_dram_speed_change_latency_supported: %d",
+ bw_fixed_to_int(data->v_blank_nbp_state_dram_speed_change_latency_supported));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_dram_speed_change_latency_supported: %d",
+ bw_fixed_to_int(data->nbp_state_dram_speed_change_latency_supported));
+
+ for (i = 0; i < maximum_number_of_surfaces; i++) {
+ DC_LOG_BANDWIDTH_CALCS(" [bool] fbc_en[%d]:%d\n", i, data->fbc_en[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] lpt_en[%d]:%d", i, data->lpt_en[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] displays_match_flag[%d]:%d", i, data->displays_match_flag[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] use_alpha[%d]:%d", i, data->use_alpha[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] orthogonal_rotation[%d]:%d", i, data->orthogonal_rotation[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] enable[%d]:%d", i, data->enable[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] access_one_channel_only[%d]:%d", i, data->access_one_channel_only[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] scatter_gather_enable_for_pipe[%d]:%d",
+ i, data->scatter_gather_enable_for_pipe[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] interlace_mode[%d]:%d",
+ i, data->interlace_mode[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] display_pstate_change_enable[%d]:%d",
+ i, data->display_pstate_change_enable[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [bool] line_buffer_prefetch[%d]:%d", i, data->line_buffer_prefetch[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] bytes_per_pixel[%d]:%d", i, data->bytes_per_pixel[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] max_chunks_non_fbc_mode[%d]:%d",
+ i, data->max_chunks_non_fbc_mode[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] lb_bpc[%d]:%d", i, data->lb_bpc[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bpphdmi[%d]:%d", i, data->output_bpphdmi[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr[%d]:%d", i, data->output_bppdp4_lane_hbr[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr2[%d]:%d",
+ i, data->output_bppdp4_lane_hbr2[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr3[%d]:%d",
+ i, data->output_bppdp4_lane_hbr3[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines stereo_mode[%d]:%d", i, data->stereo_mode[i]);
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_buffer_transfer_time[%d]:%d",
+ i, bw_fixed_to_int(data->dmif_buffer_transfer_time[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] displays_with_same_mode[%d]:%d",
+ i, bw_fixed_to_int(data->displays_with_same_mode[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_dmif_buffer_size[%d]:%d",
+ i, bw_fixed_to_int(data->stutter_dmif_buffer_size[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_refresh_duration[%d]:%d",
+ i, bw_fixed_to_int(data->stutter_refresh_duration[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_exit_watermark[%d]:%d",
+ i, bw_fixed_to_int(data->stutter_exit_watermark[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_entry_watermark[%d]:%d",
+ i, bw_fixed_to_int(data->stutter_entry_watermark[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_total[%d]:%d", i, bw_fixed_to_int(data->h_total[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_total[%d]:%d", i, bw_fixed_to_int(data->v_total[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pixel_rate[%d]:%d", i, bw_fixed_to_int(data->pixel_rate[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width[%d]:%d", i, bw_fixed_to_int(data->src_width[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pitch_in_pixels[%d]:%d",
+ i, bw_fixed_to_int(data->pitch_in_pixels[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pitch_in_pixels_after_surface_type[%d]:%d",
+ i, bw_fixed_to_int(data->pitch_in_pixels_after_surface_type[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height[%d]:%d", i, bw_fixed_to_int(data->src_height[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scale_ratio[%d]:%d", i, bw_fixed_to_int(data->scale_ratio[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_taps[%d]:%d", i, bw_fixed_to_int(data->h_taps[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_taps[%d]:%d", i, bw_fixed_to_int(data->v_taps[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->h_scale_ratio[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->v_scale_ratio[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] rotation_angle[%d]:%d",
+ i, bw_fixed_to_int(data->rotation_angle[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] compression_rate[%d]:%d",
+ i, bw_fixed_to_int(data->compression_rate[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr[%d]:%d", i, bw_fixed_to_int(data->hsr[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr[%d]:%d", i, bw_fixed_to_int(data->vsr[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_rounded_up_to_chunks[%d]:%d",
+ i, bw_fixed_to_int(data->source_width_rounded_up_to_chunks[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_pixels[%d]:%d",
+ i, bw_fixed_to_int(data->source_width_pixels[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_height_rounded_up_to_chunks[%d]:%d",
+ i, bw_fixed_to_int(data->source_height_rounded_up_to_chunks[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_bandwidth[%d]:%d",
+ i, bw_fixed_to_int(data->display_bandwidth[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] request_bandwidth[%d]:%d",
+ i, bw_fixed_to_int(data->request_bandwidth[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] bytes_per_request[%d]:%d",
+ i, bw_fixed_to_int(data->bytes_per_request[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] useful_bytes_per_request[%d]:%d",
+ i, bw_fixed_to_int(data->useful_bytes_per_request[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lines_interleaved_in_mem_access[%d]:%d",
+ i, bw_fixed_to_int(data->lines_interleaved_in_mem_access[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_hiding_lines[%d]:%d",
+ i, bw_fixed_to_int(data->latency_hiding_lines[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_partitions[%d]:%d",
+ i, bw_fixed_to_int(data->lb_partitions[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_partitions_max[%d]:%d",
+ i, bw_fixed_to_int(data->lb_partitions_max[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_with_ramping[%d]:%d",
+ i, bw_fixed_to_int(data->dispclk_required_with_ramping[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_without_ramping[%d]:%d",
+ i, bw_fixed_to_int(data->dispclk_required_without_ramping[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] data_buffer_size[%d]:%d",
+ i, bw_fixed_to_int(data->data_buffer_size[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] outstanding_chunk_request_limit[%d]:%d",
+ i, bw_fixed_to_int(data->outstanding_chunk_request_limit[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] urgent_watermark[%d]:%d",
+ i, bw_fixed_to_int(data->urgent_watermark[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_change_watermark[%d]:%d",
+ i, bw_fixed_to_int(data->nbp_state_change_watermark[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_filter_init[%d]:%d", i, bw_fixed_to_int(data->v_filter_init[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_cycle_duration[%d]:%d",
+ i, bw_fixed_to_int(data->stutter_cycle_duration[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] average_bandwidth[%d]:%d",
+ i, bw_fixed_to_int(data->average_bandwidth[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] average_bandwidth_no_compression[%d]:%d",
+ i, bw_fixed_to_int(data->average_bandwidth_no_compression[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_request_limit[%d]:%d",
+ i, bw_fixed_to_int(data->scatter_gather_pte_request_limit[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_size_per_component[%d]:%d",
+ i, bw_fixed_to_int(data->lb_size_per_component[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] memory_chunk_size_in_bytes[%d]:%d",
+ i, bw_fixed_to_int(data->memory_chunk_size_in_bytes[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pipe_chunk_size_in_bytes[%d]:%d",
+ i, bw_fixed_to_int(data->pipe_chunk_size_in_bytes[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] number_of_trips_to_memory_for_getting_apte_row[%d]:%d",
+ i, bw_fixed_to_int(data->number_of_trips_to_memory_for_getting_apte_row[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] adjusted_data_buffer_size[%d]:%d",
+ i, bw_fixed_to_int(data->adjusted_data_buffer_size[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] adjusted_data_buffer_size_in_memory[%d]:%d",
+ i, bw_fixed_to_int(data->adjusted_data_buffer_size_in_memory[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pixels_per_data_fifo_entry[%d]:%d",
+ i, bw_fixed_to_int(data->pixels_per_data_fifo_entry[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_requests_in_row[%d]:%d",
+ i, bw_fixed_to_int(data->scatter_gather_pte_requests_in_row[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pte_request_per_chunk[%d]:%d",
+ i, bw_fixed_to_int(data->pte_request_per_chunk[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_page_width[%d]:%d",
+ i, bw_fixed_to_int(data->scatter_gather_page_width[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_page_height[%d]:%d",
+ i, bw_fixed_to_int(data->scatter_gather_page_height[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_lines_in_per_line_out_in_beginning_of_frame[%d]:%d",
+ i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_beginning_of_frame[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_lines_in_per_line_out_in_middle_of_frame[%d]:%d",
+ i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_middle_of_frame[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_width_pixels[%d]:%d",
+ i, bw_fixed_to_int(data->cursor_width_pixels[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_latency_hiding[%d]:%d",
+ i, bw_fixed_to_int(data->minimum_latency_hiding[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_latency_hiding[%d]:%d",
+ i, bw_fixed_to_int(data->maximum_latency_hiding[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_latency_hiding_with_cursor[%d]:%d",
+ i, bw_fixed_to_int(data->minimum_latency_hiding_with_cursor[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_latency_hiding_with_cursor[%d]:%d",
+ i, bw_fixed_to_int(data->maximum_latency_hiding_with_cursor[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_pixels_for_first_output_pixel[%d]:%d",
+ i, bw_fixed_to_int(data->src_pixels_for_first_output_pixel[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_pixels_for_last_output_pixel[%d]:%d",
+ i, bw_fixed_to_int(data->src_pixels_for_last_output_pixel[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_data_for_first_output_pixel[%d]:%d",
+ i, bw_fixed_to_int(data->src_data_for_first_output_pixel[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_data_for_last_output_pixel[%d]:%d",
+ i, bw_fixed_to_int(data->src_data_for_last_output_pixel[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] active_time[%d]:%d", i, bw_fixed_to_int(data->active_time[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] horizontal_blank_and_chunk_granularity_factor[%d]:%d",
+ i, bw_fixed_to_int(data->horizontal_blank_and_chunk_granularity_factor[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_latency_hiding[%d]:%d",
+ i, bw_fixed_to_int(data->cursor_latency_hiding[i]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_blank_dram_speed_change_margin[%d]:%d",
+ i, bw_fixed_to_int(data->v_blank_dram_speed_change_margin[i]));
+ }
+
+ for (i = 0; i < maximum_number_of_surfaces; i++) {
+ for (j = 0; j < 3; j++) {
+ for (k = 0; k < 8; k++) {
+
+ DC_LOG_BANDWIDTH_CALCS("\n [bw_fixed] line_source_transfer_time[%d][%d][%d]:%d",
+ i, j, k, bw_fixed_to_int(data->line_source_transfer_time[i][j][k]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_speed_change_line_source_transfer_time[%d][%d][%d]:%d",
+ i, j, k,
+ bw_fixed_to_int(data->dram_speed_change_line_source_transfer_time[i][j][k]));
+ }
+ }
+ }
+
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < 8; j++) {
+
+ DC_LOG_BANDWIDTH_CALCS("\n [uint32_t] num_displays_with_margin[%d][%d]:%d",
+ i, j, data->num_displays_with_margin[i][j]);
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_burst_time[%d][%d]:%d",
+ i, j, bw_fixed_to_int(data->dmif_burst_time[i][j]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_burst_time[%d][%d]:%d",
+ i, j, bw_fixed_to_int(data->mcifwr_burst_time[i][j]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_dram_speed_change_margin[%d][%d]:%d",
+ i, j, bw_fixed_to_int(data->min_dram_speed_change_margin[i][j]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_dram_speed_change[%d][%d]:%d",
+ i, j, bw_fixed_to_int(data->dispclk_required_for_dram_speed_change[i][j]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_duration_margin[%d][%d]:%d",
+ i, j, bw_fixed_to_int(data->blackout_duration_margin[i][j]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_blackout_duration[%d][%d]:%d",
+ i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_duration[i][j]));
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_blackout_recovery[%d][%d]:%d",
+ i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_recovery[i][j]));
+ }
+ }
+
+ for (i = 0; i < 6; i++) {
+ DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_sclk_for_urgent_latency[%d]:%d",
+ i, bw_fixed_to_int(data->dmif_required_sclk_for_urgent_latency[i]));
+ }
+}
+;
+
+#endif /* _CALCS_CALCS_LOGGER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 0cbab81..4ee3c26 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -28,6 +28,7 @@
#include "dc.h"
#include "core_types.h"
#include "dal_asic_id.h"
+#include "calcs_logger.h"
/*
* NOTE:
@@ -52,11 +53,16 @@ static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asi
return BW_CALCS_VERSION_CARRIZO;
case FAMILY_VI:
+ if (ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_POLARIS12;
if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev))
return BW_CALCS_VERSION_POLARIS10;
- if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
- ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
+ if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev))
return BW_CALCS_VERSION_POLARIS11;
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_VEGAM;
+#endif
return BW_CALCS_VERSION_INVALID;
case FAMILY_AI:
@@ -2145,6 +2151,11 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
break;
case BW_CALCS_VERSION_POLARIS10:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ /* TODO: Treat VEGAM the same as P10 for now
+ * Need to tune the para for VEGAM if needed */
+ case BW_CALCS_VERSION_VEGAM:
+#endif
vbios.memory_type = bw_def_gddr5;
vbios.dram_channel_width_in_bits = 32;
vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
@@ -2373,6 +2384,122 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
break;
+ case BW_CALCS_VERSION_POLARIS12:
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 32;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 8;
+ vbios.high_yclk = bw_int_to_fixed(6000);
+ vbios.mid_yclk = bw_int_to_fixed(3200);
+ vbios.low_yclk = bw_int_to_fixed(1000);
+ vbios.low_sclk = bw_int_to_fixed(678);
+ vbios.mid1_sclk = bw_int_to_fixed(864);
+ vbios.mid2_sclk = bw_int_to_fixed(900);
+ vbios.mid3_sclk = bw_int_to_fixed(920);
+ vbios.mid4_sclk = bw_int_to_fixed(940);
+ vbios.mid5_sclk = bw_int_to_fixed(960);
+ vbios.mid6_sclk = bw_int_to_fixed(980);
+ vbios.high_sclk = bw_int_to_fixed(1049);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(48);
+ if (vbios.number_of_dram_channels == 2) // 64-bit
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+ else
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+ vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+ vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios.nbp_state_change_latency = bw_int_to_fixed(250);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = false;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 5;
+ dceip.number_of_underlay_pipes = 0;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = true;
+ dceip.argb_compression_support = true;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 4;
+ dceip.graphics_dmif_size = 12288;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = true;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(1);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ break;
case BW_CALCS_VERSION_STONEY:
vbios.memory_type = bw_def_gddr5;
vbios.dram_channel_width_in_bits = 64;
@@ -2815,6 +2942,19 @@ static void populate_initial_data(
data->bytes_per_pixel[num_displays + 4] = 4;
break;
}
+ } else if (pipe[i].stream->dst.width != 0 &&
+ pipe[i].stream->dst.height != 0 &&
+ pipe[i].stream->src.width != 0 &&
+ pipe[i].stream->src.height != 0) {
+ data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.width);
+ data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+ data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.height);
+ data->h_taps[num_displays + 4] = pipe[i].stream->src.width == pipe[i].stream->dst.width ? bw_int_to_fixed(1) : bw_int_to_fixed(2);
+ data->v_taps[num_displays + 4] = pipe[i].stream->src.height == pipe[i].stream->dst.height ? bw_int_to_fixed(1) : bw_int_to_fixed(2);
+ data->h_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.width, pipe[i].stream->dst.width);
+ data->v_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.height, pipe[i].stream->dst.height);
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+ data->bytes_per_pixel[num_displays + 4] = 4;
} else {
data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_addressable);
data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
@@ -2873,6 +3013,11 @@ bool bw_calcs(struct dc_context *ctx,
struct bw_fixed mid_yclk = vbios->mid_yclk;
struct bw_fixed low_yclk = vbios->low_yclk;
+ if (ctx->dc->debug.bandwidth_calcs_trace) {
+ print_bw_calcs_dceip(ctx->logger, dceip);
+ print_bw_calcs_vbios(ctx->logger, vbios);
+ print_bw_calcs_data(ctx->logger, data);
+ }
calculate_bandwidth(dceip, vbios, data);
yclk_lvl = data->y_clk_level;
@@ -2968,7 +3113,33 @@ bool bw_calcs(struct dc_context *ctx,
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
-
+ calcs_output->stutter_entry_wm_ns[0].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[1].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[2].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_entry_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_entry_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_entry_wm_ns[5].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[9], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[0].a_mark =
bw_fixed_to_int(bw_mul(data->
@@ -3063,7 +3234,33 @@ bool bw_calcs(struct dc_context *ctx,
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
-
+ calcs_output->stutter_entry_wm_ns[0].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[1].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[2].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_entry_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_entry_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_entry_wm_ns[5].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[9], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[0].b_mark =
bw_fixed_to_int(bw_mul(data->
@@ -3156,6 +3353,34 @@ bool bw_calcs(struct dc_context *ctx,
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[0].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[1].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[2].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_entry_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_entry_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_entry_wm_ns[5].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[9], bw_int_to_fixed(1000)));
+
calcs_output->urgent_wm_ns[0].c_mark =
bw_fixed_to_int(bw_mul(data->
urgent_watermark[4], bw_int_to_fixed(1000)));
@@ -3260,6 +3485,33 @@ bool bw_calcs(struct dc_context *ctx,
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[0].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[1].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[2].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_entry_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_entry_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_entry_wm_ns[5].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[9], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[0].d_mark =
bw_fixed_to_int(bw_mul(data->
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 4bb43a3..a102c19 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1459,39 +1459,39 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
void dcn_bw_sync_calcs_and_dml(struct dc *dc)
{
kernel_fpu_begin();
- DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %d ns\n"
- "sr_enter_plus_exit_time: %d ns\n"
- "urgent_latency: %d ns\n"
- "write_back_latency: %d ns\n"
- "percent_of_ideal_drambw_received_after_urg_latency: %d %\n"
+ DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n"
+ "sr_enter_plus_exit_time: %f ns\n"
+ "urgent_latency: %f ns\n"
+ "write_back_latency: %f ns\n"
+ "percent_of_ideal_drambw_received_after_urg_latency: %f %%\n"
"max_request_size: %d bytes\n"
- "dcfclkv_max0p9: %d kHz\n"
- "dcfclkv_nom0p8: %d kHz\n"
- "dcfclkv_mid0p72: %d kHz\n"
- "dcfclkv_min0p65: %d kHz\n"
- "max_dispclk_vmax0p9: %d kHz\n"
- "max_dispclk_vnom0p8: %d kHz\n"
- "max_dispclk_vmid0p72: %d kHz\n"
- "max_dispclk_vmin0p65: %d kHz\n"
- "max_dppclk_vmax0p9: %d kHz\n"
- "max_dppclk_vnom0p8: %d kHz\n"
- "max_dppclk_vmid0p72: %d kHz\n"
- "max_dppclk_vmin0p65: %d kHz\n"
- "socclk: %d kHz\n"
- "fabric_and_dram_bandwidth_vmax0p9: %d MB/s\n"
- "fabric_and_dram_bandwidth_vnom0p8: %d MB/s\n"
- "fabric_and_dram_bandwidth_vmid0p72: %d MB/s\n"
- "fabric_and_dram_bandwidth_vmin0p65: %d MB/s\n"
- "phyclkv_max0p9: %d kHz\n"
- "phyclkv_nom0p8: %d kHz\n"
- "phyclkv_mid0p72: %d kHz\n"
- "phyclkv_min0p65: %d kHz\n"
- "downspreading: %d %\n"
+ "dcfclkv_max0p9: %f kHz\n"
+ "dcfclkv_nom0p8: %f kHz\n"
+ "dcfclkv_mid0p72: %f kHz\n"
+ "dcfclkv_min0p65: %f kHz\n"
+ "max_dispclk_vmax0p9: %f kHz\n"
+ "max_dispclk_vnom0p8: %f kHz\n"
+ "max_dispclk_vmid0p72: %f kHz\n"
+ "max_dispclk_vmin0p65: %f kHz\n"
+ "max_dppclk_vmax0p9: %f kHz\n"
+ "max_dppclk_vnom0p8: %f kHz\n"
+ "max_dppclk_vmid0p72: %f kHz\n"
+ "max_dppclk_vmin0p65: %f kHz\n"
+ "socclk: %f kHz\n"
+ "fabric_and_dram_bandwidth_vmax0p9: %f MB/s\n"
+ "fabric_and_dram_bandwidth_vnom0p8: %f MB/s\n"
+ "fabric_and_dram_bandwidth_vmid0p72: %f MB/s\n"
+ "fabric_and_dram_bandwidth_vmin0p65: %f MB/s\n"
+ "phyclkv_max0p9: %f kHz\n"
+ "phyclkv_nom0p8: %f kHz\n"
+ "phyclkv_mid0p72: %f kHz\n"
+ "phyclkv_min0p65: %f kHz\n"
+ "downspreading: %f %%\n"
"round_trip_ping_latency_cycles: %d DCFCLK Cycles\n"
"urgent_out_of_order_return_per_channel: %d Bytes\n"
"number_of_channels: %d\n"
"vmm_page_size: %d Bytes\n"
- "dram_clock_change_latency: %d ns\n"
+ "dram_clock_change_latency: %f ns\n"
"return_bus_width: %d Bytes\n",
dc->dcn_soc->sr_exit_time * 1000,
dc->dcn_soc->sr_enter_plus_exit_time * 1000,
@@ -1527,11 +1527,11 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
dc->dcn_soc->vmm_page_size,
dc->dcn_soc->dram_clock_change_latency * 1000,
dc->dcn_soc->return_bus_width);
- DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %d\n"
- "det_buffer_size_in_kbyte: %d\n"
- "dpp_output_buffer_pixels: %d\n"
- "opp_output_buffer_lines: %d\n"
- "pixel_chunk_size_in_kbyte: %d\n"
+ DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %f\n"
+ "det_buffer_size_in_kbyte: %f\n"
+ "dpp_output_buffer_pixels: %f\n"
+ "opp_output_buffer_lines: %f\n"
+ "pixel_chunk_size_in_kbyte: %f\n"
"pte_enable: %d\n"
"pte_chunk_size: %d kbytes\n"
"meta_chunk_size: %d kbytes\n"
@@ -1550,13 +1550,13 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
"max_pscl_tolb_throughput: %d pixels/dppclk\n"
"max_lb_tovscl_throughput: %d pixels/dppclk\n"
"max_vscl_tohscl_throughput: %d pixels/dppclk\n"
- "max_hscl_ratio: %d\n"
- "max_vscl_ratio: %d\n"
+ "max_hscl_ratio: %f\n"
+ "max_vscl_ratio: %f\n"
"max_hscl_taps: %d\n"
"max_vscl_taps: %d\n"
"pte_buffer_size_in_requests: %d\n"
- "dispclk_ramping_margin: %d %\n"
- "under_scan_factor: %d %\n"
+ "dispclk_ramping_margin: %f %%\n"
+ "under_scan_factor: %f %%\n"
"max_inter_dcn_tile_repeaters: %d\n"
"can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n"
"bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n"
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 9cd3566..644b2187 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -936,95 +936,6 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
return true;
}
-/*
- * TODO this whole function needs to go
- *
- * dc_surface_update is needlessly complex. See if we can just replace this
- * with a dc_plane_state and follow the atomic model a bit more closely here.
- */
-bool dc_commit_planes_to_stream(
- struct dc *dc,
- struct dc_plane_state **plane_states,
- uint8_t new_plane_count,
- struct dc_stream_state *dc_stream,
- struct dc_state *state)
-{
- /* no need to dynamically allocate this. it's pretty small */
- struct dc_surface_update updates[MAX_SURFACES];
- struct dc_flip_addrs *flip_addr;
- struct dc_plane_info *plane_info;
- struct dc_scaling_info *scaling_info;
- int i;
- struct dc_stream_update *stream_update =
- kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
-
- if (!stream_update) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
- GFP_KERNEL);
- plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
- GFP_KERNEL);
- scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
- GFP_KERNEL);
-
- if (!flip_addr || !plane_info || !scaling_info) {
- kfree(flip_addr);
- kfree(plane_info);
- kfree(scaling_info);
- kfree(stream_update);
- return false;
- }
-
- memset(updates, 0, sizeof(updates));
-
- stream_update->src = dc_stream->src;
- stream_update->dst = dc_stream->dst;
- stream_update->out_transfer_func = dc_stream->out_transfer_func;
-
- for (i = 0; i < new_plane_count; i++) {
- updates[i].surface = plane_states[i];
- updates[i].gamma =
- (struct dc_gamma *)plane_states[i]->gamma_correction;
- updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
- flip_addr[i].address = plane_states[i]->address;
- flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
- plane_info[i].color_space = plane_states[i]->color_space;
- plane_info[i].input_tf = plane_states[i]->input_tf;
- plane_info[i].format = plane_states[i]->format;
- plane_info[i].plane_size = plane_states[i]->plane_size;
- plane_info[i].rotation = plane_states[i]->rotation;
- plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
- plane_info[i].stereo_format = plane_states[i]->stereo_format;
- plane_info[i].tiling_info = plane_states[i]->tiling_info;
- plane_info[i].visible = plane_states[i]->visible;
- plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
- plane_info[i].dcc = plane_states[i]->dcc;
- scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
- scaling_info[i].src_rect = plane_states[i]->src_rect;
- scaling_info[i].dst_rect = plane_states[i]->dst_rect;
- scaling_info[i].clip_rect = plane_states[i]->clip_rect;
-
- updates[i].flip_addr = &flip_addr[i];
- updates[i].plane_info = &plane_info[i];
- updates[i].scaling_info = &scaling_info[i];
- }
-
- dc_commit_updates_for_stream(
- dc,
- updates,
- new_plane_count,
- dc_stream, stream_update, plane_states, state);
-
- kfree(flip_addr);
- kfree(plane_info);
- kfree(scaling_info);
- kfree(stream_update);
- return true;
-}
-
struct dc_state *dc_create_state(void)
{
struct dc_state *context = kzalloc(sizeof(struct dc_state),
@@ -1107,9 +1018,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
if (u->plane_info->color_space != u->surface->color_space)
update_flags->bits.color_space_change = 1;
- if (u->plane_info->input_tf != u->surface->input_tf)
- update_flags->bits.input_tf_change = 1;
-
if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
update_flags->bits.horizontal_mirror_change = 1;
@@ -1243,12 +1151,20 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
if (u->input_csc_color_matrix)
update_flags->bits.input_csc_change = 1;
- if (update_flags->bits.in_transfer_func_change
- || update_flags->bits.input_csc_change) {
+ if (u->coeff_reduction_factor)
+ update_flags->bits.coeff_reduction_change = 1;
+
+ if (update_flags->bits.in_transfer_func_change) {
type = UPDATE_TYPE_MED;
elevate_update_type(&overall_type, type);
}
+ if (update_flags->bits.input_csc_change
+ || update_flags->bits.coeff_reduction_change) {
+ type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, type);
+ }
+
return overall_type;
}
@@ -1297,7 +1213,7 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
if (type == UPDATE_TYPE_FULL)
for (i = 0; i < surface_count; i++)
- updates[i].surface->update_flags.bits.full_update = 1;
+ updates[i].surface->update_flags.raw = 0xFFFFFFFF;
return type;
}
@@ -1375,6 +1291,12 @@ static void commit_planes_for_stream(struct dc *dc,
pipe_ctx->stream_res.abm->funcs->set_abm_level(
pipe_ctx->stream_res.abm, stream->abm_level);
}
+
+ if (stream_update && stream_update->periodic_fn_vsync_delta &&
+ pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
+ pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
+ pipe_ctx->stream->periodic_fn_vsync_delta);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 5a552cb3..267c767 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -36,8 +36,9 @@
#include "hw_sequencer.h"
#include "resource.h"
-#define DC_LOGGER \
- logger
+
+#define DC_LOGGER_INIT(logger)
+
#define SURFACE_TRACE(...) do {\
if (dc->debug.surface_trace) \
@@ -60,8 +61,7 @@ void pre_surface_trace(
int surface_count)
{
int i;
- struct dc *core_dc = dc;
- struct dal_logger *logger = core_dc->ctx->logger;
+ DC_LOGGER_INIT(dc->ctx->logger);
for (i = 0; i < surface_count; i++) {
const struct dc_plane_state *plane_state = plane_states[i];
@@ -72,8 +72,8 @@ void pre_surface_trace(
"plane_state->visible = %d;\n"
"plane_state->flip_immediate = %d;\n"
"plane_state->address.type = %d;\n"
- "plane_state->address.grph.addr.quad_part = 0x%X;\n"
- "plane_state->address.grph.meta_addr.quad_part = 0x%X;\n"
+ "plane_state->address.grph.addr.quad_part = 0x%llX;\n"
+ "plane_state->address.grph.meta_addr.quad_part = 0x%llX;\n"
"plane_state->scaling_quality.h_taps = %d;\n"
"plane_state->scaling_quality.v_taps = %d;\n"
"plane_state->scaling_quality.h_taps_c = %d;\n"
@@ -155,7 +155,6 @@ void pre_surface_trace(
"plane_state->tiling_info.gfx8.pipe_config = %d;\n"
"plane_state->tiling_info.gfx8.array_mode = %d;\n"
"plane_state->color_space = %d;\n"
- "plane_state->input_tf = %d;\n"
"plane_state->dcc.enable = %d;\n"
"plane_state->format = %d;\n"
"plane_state->rotation = %d;\n"
@@ -163,7 +162,6 @@ void pre_surface_trace(
plane_state->tiling_info.gfx8.pipe_config,
plane_state->tiling_info.gfx8.array_mode,
plane_state->color_space,
- plane_state->input_tf,
plane_state->dcc.enable,
plane_state->format,
plane_state->rotation,
@@ -183,8 +181,7 @@ void update_surface_trace(
int surface_count)
{
int i;
- struct dc *core_dc = dc;
- struct dal_logger *logger = core_dc->ctx->logger;
+ DC_LOGGER_INIT(dc->ctx->logger);
for (i = 0; i < surface_count; i++) {
const struct dc_surface_update *update = &updates[i];
@@ -192,8 +189,8 @@ void update_surface_trace(
SURFACE_TRACE("Update %d\n", i);
if (update->flip_addr) {
SURFACE_TRACE("flip_addr->address.type = %d;\n"
- "flip_addr->address.grph.addr.quad_part = 0x%X;\n"
- "flip_addr->address.grph.meta_addr.quad_part = 0x%X;\n"
+ "flip_addr->address.grph.addr.quad_part = 0x%llX;\n"
+ "flip_addr->address.grph.meta_addr.quad_part = 0x%llX;\n"
"flip_addr->flip_immediate = %d;\n",
update->flip_addr->address.type,
update->flip_addr->address.grph.addr.quad_part,
@@ -204,16 +201,15 @@ void update_surface_trace(
if (update->plane_info) {
SURFACE_TRACE(
"plane_info->color_space = %d;\n"
- "plane_info->input_tf = %d;\n"
"plane_info->format = %d;\n"
"plane_info->plane_size.grph.surface_pitch = %d;\n"
"plane_info->plane_size.grph.surface_size.height = %d;\n"
"plane_info->plane_size.grph.surface_size.width = %d;\n"
"plane_info->plane_size.grph.surface_size.x = %d;\n"
"plane_info->plane_size.grph.surface_size.y = %d;\n"
- "plane_info->rotation = %d;\n",
+ "plane_info->rotation = %d;\n"
+ "plane_info->stereo_format = %d;\n",
update->plane_info->color_space,
- update->plane_info->input_tf,
update->plane_info->format,
update->plane_info->plane_size.grph.surface_pitch,
update->plane_info->plane_size.grph.surface_size.height,
@@ -303,8 +299,7 @@ void update_surface_trace(
void post_surface_trace(struct dc *dc)
{
- struct dc *core_dc = dc;
- struct dal_logger *logger = core_dc->ctx->logger;
+ DC_LOGGER_INIT(dc->ctx->logger);
SURFACE_TRACE("post surface process.\n");
@@ -316,10 +311,10 @@ void context_timing_trace(
{
int i;
struct dc *core_dc = dc;
- struct dal_logger *logger = core_dc->ctx->logger;
int h_pos[MAX_PIPES], v_pos[MAX_PIPES];
struct crtc_position position;
unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
+ DC_LOGGER_INIT(dc->ctx->logger);
for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
@@ -354,9 +349,7 @@ void context_clock_trace(
struct dc_state *context)
{
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- struct dc *core_dc = dc;
- struct dal_logger *logger = core_dc->ctx->logger;
-
+ DC_LOGGER_INIT(dc->ctx->logger);
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
context->bw.dcn.calc_clk.dispclk_khz,
@@ -371,6 +364,7 @@ void context_clock_trace(
context->bw.dcn.calc_clk.dppclk_khz,
context->bw.dcn.calc_clk.dcfclk_khz,
context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.calc_clk.fclk_khz);
+ context->bw.dcn.calc_clk.fclk_khz,
+ context->bw.dcn.calc_clk.socclk_khz);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index ebc96b7..83d1215 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -208,6 +208,7 @@ void color_space_to_black_color(
case COLOR_SPACE_YCBCR709:
case COLOR_SPACE_YCBCR601_LIMITED:
case COLOR_SPACE_YCBCR709_LIMITED:
+ case COLOR_SPACE_2020_YCBCR:
*black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
break;
@@ -216,7 +217,25 @@ void color_space_to_black_color(
black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
break;
- default:
+ /**
+ * Remove default and add case for all color space
+ * so when we forget to add new color space
+ * compiler will give a warning
+ */
+ case COLOR_SPACE_UNKNOWN:
+ case COLOR_SPACE_SRGB:
+ case COLOR_SPACE_XR_RGB:
+ case COLOR_SPACE_MSREF_SCRGB:
+ case COLOR_SPACE_XV_YCC_709:
+ case COLOR_SPACE_XV_YCC_601:
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ case COLOR_SPACE_ADOBERGB:
+ case COLOR_SPACE_DCIP3:
+ case COLOR_SPACE_DISPLAYNATIVE:
+ case COLOR_SPACE_DOLBYVISION:
+ case COLOR_SPACE_APPCTRL:
+ case COLOR_SPACE_CUSTOMPOINTS:
/* fefault is sRGB black (full range). */
*black_color =
black_color_format[BLACK_COLOR_FORMAT_RGB_FULLRANGE];
@@ -230,6 +249,9 @@ bool hwss_wait_for_blank_complete(
{
int counter;
+ /* Not applicable if the pipe is not primary, save 300ms of boot time */
+ if (!tg->funcs->is_blanked)
+ return true;
for (counter = 0; counter < 100; counter++) {
if (tg->funcs->is_blanked(tg))
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 6d1c498..b44cf52 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -45,8 +45,9 @@
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_enum.h"
#include "dce/dce_11_0_sh_mask.h"
-#define DC_LOGGER \
- dc_ctx->logger
+
+#define DC_LOGGER_INIT(logger)
+
#define LINK_INFO(...) \
DC_LOG_HW_HOTPLUG( \
@@ -561,7 +562,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
struct dc_context *dc_ctx = link->ctx;
struct dc_sink *sink = NULL;
enum dc_connection_type new_connection_type = dc_connection_none;
-
+ DC_LOGGER_INIT(link->ctx->logger);
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
return false;
@@ -927,6 +928,7 @@ static bool construct(
struct integrated_info info = {{{ 0 }}};
struct dc_bios *bios = init_params->dc->ctx->dc_bios;
const struct dc_vbios_funcs *bp_funcs = bios->funcs;
+ DC_LOGGER_INIT(dc_ctx->logger);
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
@@ -1135,7 +1137,8 @@ static void dpcd_configure_panel_mode(
{
union dpcd_edp_config edp_config_set;
bool panel_mode_edp = false;
- struct dc_context *dc_ctx = link->ctx;
+ DC_LOGGER_INIT(link->ctx->logger);
+
memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
if (DP_PANEL_MODE_DEFAULT != panel_mode) {
@@ -1183,16 +1186,21 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
- union down_spread_ctrl downspread;
+ union down_spread_ctrl old_downspread;
+ union down_spread_ctrl new_downspread;
core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
- &downspread.raw, sizeof(downspread));
+ &old_downspread.raw, sizeof(old_downspread));
- downspread.bits.IGNORE_MSA_TIMING_PARAM =
+ new_downspread.raw = old_downspread.raw;
+
+ new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
(stream->ignore_msa_timing_param) ? 1 : 0;
- core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
- &downspread.raw, sizeof(downspread));
+ if (new_downspread.raw != old_downspread.raw) {
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &new_downspread.raw, sizeof(new_downspread));
+ }
}
static enum dc_status enable_link_dp(
@@ -1843,9 +1851,22 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
static bool dp_active_dongle_validate_timing(
const struct dc_crtc_timing *timing,
- const struct dc_dongle_caps *dongle_caps)
+ const struct dpcd_caps *dpcd_caps)
{
unsigned int required_pix_clk = timing->pix_clk_khz;
+ const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
+
+ switch (dpcd_caps->dongle_type) {
+ case DISPLAY_DONGLE_DP_VGA_CONVERTER:
+ case DISPLAY_DONGLE_DP_DVI_CONVERTER:
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
+ if (timing->pixel_encoding == PIXEL_ENCODING_RGB)
+ return true;
+ else
+ return false;
+ default:
+ break;
+ }
if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
dongle_caps->extendedCapValid == false)
@@ -1911,7 +1932,7 @@ enum dc_status dc_link_validate_mode_timing(
const struct dc_crtc_timing *timing)
{
uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
- struct dc_dongle_caps *dongle_caps = &link->dpcd_caps.dongle_caps;
+ struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
/* A hack to avoid failing any modes for EDID override feature on
* topology change such as lower quality cable for DP or different dongle
@@ -1924,7 +1945,7 @@ enum dc_status dc_link_validate_mode_timing(
return DC_EXCEED_DONGLE_CAP;
/* Active Dongle*/
- if (!dp_active_dongle_validate_timing(timing, dongle_caps))
+ if (!dp_active_dongle_validate_timing(timing, dpcd_caps))
return DC_EXCEED_DONGLE_CAP;
switch (stream->signal) {
@@ -1950,10 +1971,10 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
struct dc *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
- struct dc_context *dc_ctx = link->ctx;
unsigned int controller_id = 0;
bool use_smooth_brightness = true;
int i;
+ DC_LOGGER_INIT(link->ctx->logger);
if ((dmcu == NULL) ||
(abm == NULL) ||
@@ -1961,7 +1982,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
return false;
if (stream) {
- if (stream->bl_pwm_level == 0)
+ if (stream->bl_pwm_level == EDP_BACKLIGHT_RAMP_DISABLE_LEVEL)
frame_ramp = 0;
((struct dc_stream_state *)stream)->bl_pwm_level = level;
@@ -2149,8 +2170,8 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
- struct dc_context *dc_ctx = link->ctx;
uint8_t i;
+ DC_LOGGER_INIT(link->ctx->logger);
/* enable_link_dp_mst already check link->enabled_stream_count
* and stream is in link->stream[]. This is called during set mode,
@@ -2178,11 +2199,11 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
link->mst_stream_alloc_table.stream_count);
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- DC_LOG_MST("stream_enc[%d]: 0x%x "
+ DC_LOG_MST("stream_enc[%d]: %p "
"stream[%d].vcp_id: %d "
"stream[%d].slot_count: %d\n",
i,
- link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
i,
link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
i,
@@ -2229,7 +2250,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
uint8_t i;
bool mst_mode = (link->type == dc_connection_mst_branch);
- struct dc_context *dc_ctx = link->ctx;
+ DC_LOGGER_INIT(link->ctx->logger);
/* deallocate_mst_payload is called before disable link. When mode or
* disable/enable monitor, new stream is created which is not in link
@@ -2268,11 +2289,11 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
link->mst_stream_alloc_table.stream_count);
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- DC_LOG_MST("stream_enc[%d]: 0x%x "
+ DC_LOG_MST("stream_enc[%d]: %p "
"stream[%d].vcp_id: %d "
"stream[%d].slot_count: %d\n",
i,
- link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
i,
link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
i,
@@ -2302,8 +2323,8 @@ void core_link_enable_stream(
struct pipe_ctx *pipe_ctx)
{
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
- struct dc_context *dc_ctx = pipe_ctx->stream->ctx;
enum dc_status status;
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* eDP lit up by bios already, no need to enable again. */
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 49c2fac..ae48d60 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -629,13 +629,14 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
-ssize_t dal_ddc_service_read_dpcd_data(
+enum ddc_result dal_ddc_service_read_dpcd_data(
struct ddc_service *ddc,
bool i2c,
enum i2c_mot_mode mot,
uint32_t address,
uint8_t *data,
- uint32_t len)
+ uint32_t len,
+ uint32_t *read)
{
struct aux_payload read_payload = {
.i2c_over_aux = i2c,
@@ -652,6 +653,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
.mot = mot
};
+ *read = 0;
+
if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
BREAK_TO_DEBUGGER();
return DDC_RESULT_FAILED_INVALID_OPERATION;
@@ -661,7 +664,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
ddc->ctx->i2caux,
ddc->ddc_pin,
&command)) {
- return (ssize_t)command.payloads->length;
+ *read = command.payloads->length;
+ return DDC_RESULT_SUCESSFULL;
}
return DDC_RESULT_FAILED_OPERATION;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 3b50535..7d609c7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1378,8 +1378,8 @@ static uint32_t bandwidth_in_kbps_from_timing(
{
uint32_t bits_per_channel = 0;
uint32_t kbps;
- switch (timing->display_color_depth) {
+ switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
bits_per_channel = 6;
break;
@@ -1401,14 +1401,20 @@ static uint32_t bandwidth_in_kbps_from_timing(
default:
break;
}
+
ASSERT(bits_per_channel != 0);
kbps = timing->pix_clk_khz;
kbps *= bits_per_channel;
- if (timing->flags.Y_ONLY != 1)
+ if (timing->flags.Y_ONLY != 1) {
/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
return kbps;
@@ -2278,6 +2284,8 @@ static bool retrieve_link_cap(struct dc_link *link)
union edp_configuration_cap edp_config_cap;
union dp_downstream_port_present ds_port = { 0 };
enum dc_status status = DC_ERROR_UNEXPECTED;
+ uint32_t read_dpcd_retry_cnt = 3;
+ int i;
memset(dpcd_data, '\0', sizeof(dpcd_data));
memset(&down_strm_port_count,
@@ -2285,11 +2293,15 @@ static bool retrieve_link_cap(struct dc_link *link)
memset(&edp_config_cap, '\0',
sizeof(union edp_configuration_cap));
- status = core_link_read_dpcd(
- link,
- DP_DPCD_REV,
- dpcd_data,
- sizeof(dpcd_data));
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+ if (status == DC_OK)
+ break;
+ }
if (status != DC_OK) {
dm_error("%s: Read dpcd data failed.\n", __func__);
@@ -2376,6 +2388,10 @@ bool detect_dp_sink_caps(struct dc_link *link)
void detect_edp_sink_caps(struct dc_link *link)
{
retrieve_link_cap(link);
+
+ if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)
+ link->reported_link_cap.link_rate = LINK_RATE_HIGH2;
+
link->verified_link_cap = link->reported_link_cap;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ba3487e..9eb731f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -45,8 +45,9 @@
#include "dcn10/dcn10_resource.h"
#endif
#include "dce120/dce120_resource.h"
-#define DC_LOGGER \
- ctx->logger
+
+#define DC_LOGGER_INIT(logger)
+
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
{
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
@@ -78,6 +79,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
dc_version = DCE_VERSION_11_2;
}
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
+ dc_version = DCE_VERSION_11_22;
+#endif
break;
case FAMILY_AI:
dc_version = DCE_VERSION_12_0;
@@ -124,6 +129,9 @@ struct resource_pool *dc_create_resource_pool(
num_virtual_links, dc, asic_id);
break;
case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+#endif
res_pool = dce112_create_resource_pool(
num_virtual_links, dc);
break;
@@ -835,7 +843,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
struct view recout_skip = { 0 };
bool res = false;
- struct dc_context *ctx = pipe_ctx->stream->ctx;
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Important: scaling ratio calculation requires pixel format,
* lb depth calculation requires recout and taps require scaling ratios.
* Inits require viewport, taps, ratios and recout of split pipe
@@ -843,6 +851,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
+ if (pipe_ctx->stream->timing.flags.INTERLACE)
+ pipe_ctx->stream->dst.height *= 2;
+
calculate_scaling_ratios(pipe_ctx);
calculate_viewport(pipe_ctx);
@@ -863,6 +874,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
+ if (pipe_ctx->stream->timing.flags.INTERLACE)
+ pipe_ctx->plane_res.scl_data.v_active *= 2;
/* Taps calculations */
@@ -908,6 +921,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
plane_state->dst_rect.x,
plane_state->dst_rect.y);
+ if (pipe_ctx->stream->timing.flags.INTERLACE)
+ pipe_ctx->stream->dst.height /= 2;
+
return res;
}
@@ -1294,6 +1310,19 @@ bool dc_add_all_planes_for_stream(
}
+static bool is_hdr_static_meta_changed(struct dc_stream_state *cur_stream,
+ struct dc_stream_state *new_stream)
+{
+ if (cur_stream == NULL)
+ return true;
+
+ if (memcmp(&cur_stream->hdr_static_metadata,
+ &new_stream->hdr_static_metadata,
+ sizeof(struct dc_info_packet)) != 0)
+ return true;
+
+ return false;
+}
static bool is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream)
@@ -1329,6 +1358,9 @@ static bool are_stream_backends_same(
if (is_timing_changed(stream_a, stream_b))
return false;
+ if (is_hdr_static_meta_changed(stream_a, stream_b))
+ return false;
+
return true;
}
@@ -1599,18 +1631,6 @@ enum dc_status dc_remove_stream_from_ctx(
return DC_OK;
}
-static void copy_pipe_ctx(
- const struct pipe_ctx *from_pipe_ctx, struct pipe_ctx *to_pipe_ctx)
-{
- struct dc_plane_state *plane_state = to_pipe_ctx->plane_state;
- struct dc_stream_state *stream = to_pipe_ctx->stream;
-
- *to_pipe_ctx = *from_pipe_ctx;
- to_pipe_ctx->stream = stream;
- if (plane_state != NULL)
- to_pipe_ctx->plane_state = plane_state;
-}
-
static struct dc_stream_state *find_pll_sharable_stream(
struct dc_stream_state *stream_needs_pll,
struct dc_state *context)
@@ -1703,7 +1723,7 @@ enum dc_status resource_map_pool_resources(
pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
#endif
- if (pipe_idx < 0)
+ if (pipe_idx < 0 || context->res_ctx.pipe_ctx[pipe_idx].stream_res.tg == NULL)
return DC_NO_CONTROLLER_RESOURCE;
pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
@@ -1752,26 +1772,6 @@ enum dc_status resource_map_pool_resources(
return DC_ERROR_UNEXPECTED;
}
-/* first stream in the context is used to populate the rest */
-void validate_guaranteed_copy_streams(
- struct dc_state *context,
- int max_streams)
-{
- int i;
-
- for (i = 1; i < max_streams; i++) {
- context->streams[i] = context->streams[0];
-
- copy_pipe_ctx(&context->res_ctx.pipe_ctx[0],
- &context->res_ctx.pipe_ctx[i]);
- context->res_ctx.pipe_ctx[i].stream =
- context->res_ctx.pipe_ctx[0].stream;
-
- dc_stream_retain(context->streams[i]);
- context->stream_count++;
- }
-}
-
void dc_resource_state_copy_construct_current(
const struct dc *dc,
struct dc_state *dst_ctx)
@@ -1798,9 +1798,9 @@ enum dc_status dc_validate_global_state(
return DC_ERROR_UNEXPECTED;
if (dc->res_pool->funcs->validate_global) {
- result = dc->res_pool->funcs->validate_global(dc, new_ctx);
- if (result != DC_OK)
- return result;
+ result = dc->res_pool->funcs->validate_global(dc, new_ctx);
+ if (result != DC_OK)
+ return result;
}
for (i = 0; i < new_ctx->stream_count; i++) {
@@ -1843,7 +1843,7 @@ enum dc_status dc_validate_global_state(
}
static void patch_gamut_packet_checksum(
- struct encoder_info_packet *gamut_packet)
+ struct dc_info_packet *gamut_packet)
{
/* For gamut we recalc checksum */
if (gamut_packet->valid) {
@@ -1862,12 +1862,11 @@ static void patch_gamut_packet_checksum(
}
static void set_avi_info_frame(
- struct encoder_info_packet *info_packet,
+ struct dc_info_packet *info_packet,
struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
- struct info_frame info_frame = { {0} };
uint32_t pixel_encoding = 0;
enum scanning_type scan_type = SCANNING_TYPE_NODATA;
enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA;
@@ -1877,22 +1876,24 @@ static void set_avi_info_frame(
unsigned int cn0_cn1_value = 0;
uint8_t *check_sum = NULL;
uint8_t byte_index = 0;
- union hdmi_info_packet *hdmi_info = &info_frame.avi_info_packet.info_packet_hdmi;
+ union hdmi_info_packet hdmi_info;
union display_content_support support = {0};
unsigned int vic = pipe_ctx->stream->timing.vic;
enum dc_timing_3d_format format;
+ memset(&hdmi_info, 0, sizeof(union hdmi_info_packet));
+
color_space = pipe_ctx->stream->output_color_space;
if (color_space == COLOR_SPACE_UNKNOWN)
color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ?
COLOR_SPACE_SRGB:COLOR_SPACE_YCBCR709;
/* Initialize header */
- hdmi_info->bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI;
+ hdmi_info.bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI;
/* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall
* not be used in HDMI 2.0 (Section 10.1) */
- hdmi_info->bits.header.version = 2;
- hdmi_info->bits.header.length = HDMI_AVI_INFOFRAME_SIZE;
+ hdmi_info.bits.header.version = 2;
+ hdmi_info.bits.header.length = HDMI_AVI_INFOFRAME_SIZE;
/*
* IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built
@@ -1918,39 +1919,39 @@ static void set_avi_info_frame(
/* Y0_Y1_Y2 : The pixel encoding */
/* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */
- hdmi_info->bits.Y0_Y1_Y2 = pixel_encoding;
+ hdmi_info.bits.Y0_Y1_Y2 = pixel_encoding;
/* A0 = 1 Active Format Information valid */
- hdmi_info->bits.A0 = ACTIVE_FORMAT_VALID;
+ hdmi_info.bits.A0 = ACTIVE_FORMAT_VALID;
/* B0, B1 = 3; Bar info data is valid */
- hdmi_info->bits.B0_B1 = BAR_INFO_BOTH_VALID;
+ hdmi_info.bits.B0_B1 = BAR_INFO_BOTH_VALID;
- hdmi_info->bits.SC0_SC1 = PICTURE_SCALING_UNIFORM;
+ hdmi_info.bits.SC0_SC1 = PICTURE_SCALING_UNIFORM;
/* S0, S1 : Underscan / Overscan */
/* TODO: un-hardcode scan type */
scan_type = SCANNING_TYPE_UNDERSCAN;
- hdmi_info->bits.S0_S1 = scan_type;
+ hdmi_info.bits.S0_S1 = scan_type;
/* C0, C1 : Colorimetry */
if (color_space == COLOR_SPACE_YCBCR709 ||
color_space == COLOR_SPACE_YCBCR709_LIMITED)
- hdmi_info->bits.C0_C1 = COLORIMETRY_ITU709;
+ hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
else if (color_space == COLOR_SPACE_YCBCR601 ||
color_space == COLOR_SPACE_YCBCR601_LIMITED)
- hdmi_info->bits.C0_C1 = COLORIMETRY_ITU601;
+ hdmi_info.bits.C0_C1 = COLORIMETRY_ITU601;
else {
- hdmi_info->bits.C0_C1 = COLORIMETRY_NO_DATA;
+ hdmi_info.bits.C0_C1 = COLORIMETRY_NO_DATA;
}
if (color_space == COLOR_SPACE_2020_RGB_FULLRANGE ||
color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE ||
color_space == COLOR_SPACE_2020_YCBCR) {
- hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
- hdmi_info->bits.C0_C1 = COLORIMETRY_EXTENDED;
+ hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
+ hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED;
} else if (color_space == COLOR_SPACE_ADOBERGB) {
- hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB;
- hdmi_info->bits.C0_C1 = COLORIMETRY_EXTENDED;
+ hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB;
+ hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED;
}
/* TODO: un-hardcode aspect ratio */
@@ -1959,18 +1960,18 @@ static void set_avi_info_frame(
switch (aspect) {
case ASPECT_RATIO_4_3:
case ASPECT_RATIO_16_9:
- hdmi_info->bits.M0_M1 = aspect;
+ hdmi_info.bits.M0_M1 = aspect;
break;
case ASPECT_RATIO_NO_DATA:
case ASPECT_RATIO_64_27:
case ASPECT_RATIO_256_135:
default:
- hdmi_info->bits.M0_M1 = 0;
+ hdmi_info.bits.M0_M1 = 0;
}
/* Active Format Aspect ratio - same as Picture Aspect Ratio. */
- hdmi_info->bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
+ hdmi_info.bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
/* TODO: un-hardcode cn0_cn1 and itc */
@@ -2013,8 +2014,8 @@ static void set_avi_info_frame(
}
}
}
- hdmi_info->bits.CN0_CN1 = cn0_cn1_value;
- hdmi_info->bits.ITC = itc_value;
+ hdmi_info.bits.CN0_CN1 = cn0_cn1_value;
+ hdmi_info.bits.ITC = itc_value;
}
/* TODO : We should handle YCC quantization */
@@ -2023,19 +2024,19 @@ static void set_avi_info_frame(
stream->sink->edid_caps.qy_bit == 1) {
if (color_space == COLOR_SPACE_SRGB ||
color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
- hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
- hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
+ hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
} else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
- hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
- hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
+ hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
} else {
- hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
- hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+ hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
}
} else {
- hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
- hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+ hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
}
///VIC
@@ -2060,51 +2061,49 @@ static void set_avi_info_frame(
break;
}
}
- hdmi_info->bits.VIC0_VIC7 = vic;
+ hdmi_info.bits.VIC0_VIC7 = vic;
/* pixel repetition
* PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
* repetition start from 1 */
- hdmi_info->bits.PR0_PR3 = 0;
+ hdmi_info.bits.PR0_PR3 = 0;
/* Bar Info
* barTop: Line Number of End of Top Bar.
* barBottom: Line Number of Start of Bottom Bar.
* barLeft: Pixel Number of End of Left Bar.
* barRight: Pixel Number of Start of Right Bar. */
- hdmi_info->bits.bar_top = stream->timing.v_border_top;
- hdmi_info->bits.bar_bottom = (stream->timing.v_total
+ hdmi_info.bits.bar_top = stream->timing.v_border_top;
+ hdmi_info.bits.bar_bottom = (stream->timing.v_total
- stream->timing.v_border_bottom + 1);
- hdmi_info->bits.bar_left = stream->timing.h_border_left;
- hdmi_info->bits.bar_right = (stream->timing.h_total
+ hdmi_info.bits.bar_left = stream->timing.h_border_left;
+ hdmi_info.bits.bar_right = (stream->timing.h_total
- stream->timing.h_border_right + 1);
/* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
- check_sum = &info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.sb[0];
+ check_sum = &hdmi_info.packet_raw_data.sb[0];
*check_sum = HDMI_INFOFRAME_TYPE_AVI + HDMI_AVI_INFOFRAME_SIZE + 2;
for (byte_index = 1; byte_index <= HDMI_AVI_INFOFRAME_SIZE; byte_index++)
- *check_sum += hdmi_info->packet_raw_data.sb[byte_index];
+ *check_sum += hdmi_info.packet_raw_data.sb[byte_index];
/* one byte complement */
*check_sum = (uint8_t) (0x100 - *check_sum);
/* Store in hw_path_mode */
- info_packet->hb0 = hdmi_info->packet_raw_data.hb0;
- info_packet->hb1 = hdmi_info->packet_raw_data.hb1;
- info_packet->hb2 = hdmi_info->packet_raw_data.hb2;
+ info_packet->hb0 = hdmi_info.packet_raw_data.hb0;
+ info_packet->hb1 = hdmi_info.packet_raw_data.hb1;
+ info_packet->hb2 = hdmi_info.packet_raw_data.hb2;
- for (byte_index = 0; byte_index < sizeof(info_frame.avi_info_packet.
- info_packet_hdmi.packet_raw_data.sb); byte_index++)
- info_packet->sb[byte_index] = info_frame.avi_info_packet.
- info_packet_hdmi.packet_raw_data.sb[byte_index];
+ for (byte_index = 0; byte_index < sizeof(hdmi_info.packet_raw_data.sb); byte_index++)
+ info_packet->sb[byte_index] = hdmi_info.packet_raw_data.sb[byte_index];
info_packet->valid = true;
}
static void set_vendor_info_packet(
- struct encoder_info_packet *info_packet,
+ struct dc_info_packet *info_packet,
struct dc_stream_state *stream)
{
uint32_t length = 0;
@@ -2217,7 +2216,7 @@ static void set_vendor_info_packet(
}
static void set_spd_info_packet(
- struct encoder_info_packet *info_packet,
+ struct dc_info_packet *info_packet,
struct dc_stream_state *stream)
{
/* SPD info packet for FreeSync */
@@ -2338,104 +2337,19 @@ static void set_spd_info_packet(
}
static void set_hdr_static_info_packet(
- struct encoder_info_packet *info_packet,
+ struct dc_info_packet *info_packet,
struct dc_stream_state *stream)
{
- uint16_t i = 0;
- enum signal_type signal = stream->signal;
- uint32_t data;
+ /* HDR Static Metadata info packet for HDR10 */
- if (!stream->hdr_static_metadata.hdr_supported)
+ if (!stream->hdr_static_metadata.valid)
return;
- if (dc_is_hdmi_signal(signal)) {
- info_packet->valid = true;
-
- info_packet->hb0 = 0x87;
- info_packet->hb1 = 0x01;
- info_packet->hb2 = 0x1A;
- i = 1;
- } else if (dc_is_dp_signal(signal)) {
- info_packet->valid = true;
-
- info_packet->hb0 = 0x00;
- info_packet->hb1 = 0x87;
- info_packet->hb2 = 0x1D;
- info_packet->hb3 = (0x13 << 2);
- i = 2;
- }
-
- data = stream->hdr_static_metadata.is_hdr;
- info_packet->sb[i++] = data ? 0x02 : 0x00;
- info_packet->sb[i++] = 0x00;
-
- data = stream->hdr_static_metadata.chromaticity_green_x / 2;
- info_packet->sb[i++] = data & 0xFF;
- info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
- data = stream->hdr_static_metadata.chromaticity_green_y / 2;
- info_packet->sb[i++] = data & 0xFF;
- info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
- data = stream->hdr_static_metadata.chromaticity_blue_x / 2;
- info_packet->sb[i++] = data & 0xFF;
- info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
- data = stream->hdr_static_metadata.chromaticity_blue_y / 2;
- info_packet->sb[i++] = data & 0xFF;
- info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
- data = stream->hdr_static_metadata.chromaticity_red_x / 2;
- info_packet->sb[i++] = data & 0xFF;
- info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
- data = stream->hdr_static_metadata.chromaticity_red_y / 2;
- info_packet->sb[i++] = data & 0xFF;
- info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
- data = stream->hdr_static_metadata.chromaticity_white_point_x / 2;
- info_packet->sb[i++] = data & 0xFF;
- info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
- data = stream->hdr_static_metadata.chromaticity_white_point_y / 2;
- info_packet->sb[i++] = data & 0xFF;
- info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
- data = stream->hdr_static_metadata.max_luminance;
- info_packet->sb[i++] = data & 0xFF;
- info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
- data = stream->hdr_static_metadata.min_luminance;
- info_packet->sb[i++] = data & 0xFF;
- info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
- data = stream->hdr_static_metadata.maximum_content_light_level;
- info_packet->sb[i++] = data & 0xFF;
- info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
- data = stream->hdr_static_metadata.maximum_frame_average_light_level;
- info_packet->sb[i++] = data & 0xFF;
- info_packet->sb[i++] = (data & 0xFF00) >> 8;
-
- if (dc_is_hdmi_signal(signal)) {
- uint32_t checksum = 0;
-
- checksum += info_packet->hb0;
- checksum += info_packet->hb1;
- checksum += info_packet->hb2;
-
- for (i = 1; i <= info_packet->hb2; i++)
- checksum += info_packet->sb[i];
-
- info_packet->sb[0] = 0x100 - checksum;
- } else if (dc_is_dp_signal(signal)) {
- info_packet->sb[0] = 0x01;
- info_packet->sb[1] = 0x1A;
- }
+ *info_packet = stream->hdr_static_metadata;
}
static void set_vsc_info_packet(
- struct encoder_info_packet *info_packet,
+ struct dc_info_packet *info_packet,
struct dc_stream_state *stream)
{
unsigned int vscPacketRevision = 0;
@@ -2650,6 +2564,8 @@ bool pipe_need_reprogram(
if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
return true;
+ if (is_hdr_static_meta_changed(pipe_ctx_old->stream, pipe_ctx->stream))
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index ce0747e..3732a1d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -101,14 +101,16 @@ static void construct(struct dc_stream_state *stream,
stream->status.link = stream->sink->link;
update_stream_signal(stream);
+
+ stream->out_transfer_func = dc_create_transfer_func();
+ stream->out_transfer_func->type = TF_TYPE_BYPASS;
}
static void destruct(struct dc_stream_state *stream)
{
dc_sink_release(stream->sink);
if (stream->out_transfer_func != NULL) {
- dc_transfer_func_release(
- stream->out_transfer_func);
+ dc_transfer_func_release(stream->out_transfer_func);
stream->out_transfer_func = NULL;
}
}
@@ -176,6 +178,7 @@ bool dc_stream_set_cursor_attributes(
int i;
struct dc *core_dc;
struct resource_context *res_ctx;
+ struct pipe_ctx *pipe_to_program = NULL;
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
@@ -203,9 +206,17 @@ bool dc_stream_set_cursor_attributes(
if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
continue;
+ if (!pipe_to_program) {
+ pipe_to_program = pipe_ctx;
+ core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
+ }
core_dc->hwss.set_cursor_attribute(pipe_ctx);
}
+
+ if (pipe_to_program)
+ core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
+
return true;
}
@@ -216,6 +227,7 @@ bool dc_stream_set_cursor_position(
int i;
struct dc *core_dc;
struct resource_context *res_ctx;
+ struct pipe_ctx *pipe_to_program = NULL;
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
@@ -241,9 +253,17 @@ bool dc_stream_set_cursor_position(
!pipe_ctx->plane_res.ipp)
continue;
+ if (!pipe_to_program) {
+ pipe_to_program = pipe_ctx;
+ core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
+ }
+
core_dc->hwss.set_cursor_position(pipe_ctx);
}
+ if (pipe_to_program)
+ core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false);
+
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index ade5b8e..68a71ad 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -38,6 +38,12 @@
static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
{
plane_state->ctx = ctx;
+
+ plane_state->gamma_correction = dc_create_gamma();
+ plane_state->gamma_correction->is_identity = true;
+
+ plane_state->in_transfer_func = dc_create_transfer_func();
+ plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
}
static void destruct(struct dc_plane_state *plane_state)
@@ -66,8 +72,8 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
{
struct dc *core_dc = dc;
- struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state),
- GFP_KERNEL);
+ struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
+ GFP_KERNEL);
if (NULL == plane_state)
return NULL;
@@ -120,7 +126,7 @@ static void dc_plane_state_free(struct kref *kref)
{
struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount);
destruct(plane_state);
- kfree(plane_state);
+ kvfree(plane_state);
}
void dc_plane_state_release(struct dc_plane_state *plane_state)
@@ -136,7 +142,7 @@ void dc_gamma_retain(struct dc_gamma *gamma)
static void dc_gamma_free(struct kref *kref)
{
struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount);
- kfree(gamma);
+ kvfree(gamma);
}
void dc_gamma_release(struct dc_gamma **gamma)
@@ -147,7 +153,7 @@ void dc_gamma_release(struct dc_gamma **gamma)
struct dc_gamma *dc_create_gamma(void)
{
- struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL);
+ struct dc_gamma *gamma = kvzalloc(sizeof(*gamma), GFP_KERNEL);
if (gamma == NULL)
goto alloc_fail;
@@ -167,7 +173,7 @@ void dc_transfer_func_retain(struct dc_transfer_func *tf)
static void dc_transfer_func_free(struct kref *kref)
{
struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount);
- kfree(tf);
+ kvfree(tf);
}
void dc_transfer_func_release(struct dc_transfer_func *tf)
@@ -175,9 +181,9 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
kref_put(&tf->refcount, dc_transfer_func_free);
}
-struct dc_transfer_func *dc_create_transfer_func(void)
+struct dc_transfer_func *dc_create_transfer_func()
{
- struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL);
+ struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
if (tf == NULL)
goto alloc_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index fa4b3c8..cd4f434 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
#include "inc/compressor.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.1.38"
+#define DC_VER "3.1.44"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
@@ -202,6 +202,7 @@ struct dc_debug {
bool timing_trace;
bool clock_trace;
bool validation_trace;
+ bool bandwidth_calcs_trace;
/* stutter efficiency related */
bool disable_stutter;
@@ -332,20 +333,6 @@ enum {
TRANSFER_FUNC_POINTS = 1025
};
-// Moved here from color module for linux
-enum color_transfer_func {
- transfer_func_unknown,
- transfer_func_srgb,
- transfer_func_bt709,
- transfer_func_pq2084,
- transfer_func_pq2084_interim,
- transfer_func_linear_0_1,
- transfer_func_linear_0_125,
- transfer_func_dolbyvision,
- transfer_func_gamma_22,
- transfer_func_gamma_26
-};
-
struct dc_hdr_static_metadata {
/* display chromaticities and white point in units of 0.00001 */
unsigned int chromaticity_green_x;
@@ -361,9 +348,6 @@ struct dc_hdr_static_metadata {
uint32_t max_luminance;
uint32_t maximum_content_light_level;
uint32_t maximum_frame_average_light_level;
-
- bool hdr_supported;
- bool is_hdr;
};
enum dc_transfer_func_type {
@@ -419,7 +403,6 @@ union surface_update_flags {
/* Medium updates */
uint32_t dcc_change:1;
uint32_t color_space_change:1;
- uint32_t input_tf_change:1;
uint32_t horizontal_mirror_change:1;
uint32_t per_pixel_alpha_change:1;
uint32_t rotation_change:1;
@@ -428,6 +411,7 @@ union surface_update_flags {
uint32_t position_change:1;
uint32_t in_transfer_func_change:1;
uint32_t input_csc_change:1;
+ uint32_t coeff_reduction_change:1;
uint32_t output_tf_change:1;
uint32_t pixel_format_change:1;
@@ -460,7 +444,7 @@ struct dc_plane_state {
struct dc_gamma *gamma_correction;
struct dc_transfer_func *in_transfer_func;
struct dc_bias_and_scale *bias_and_scale;
- struct csc_transform input_csc_color_matrix;
+ struct dc_csc_transform input_csc_color_matrix;
struct fixed31_32 coeff_reduction_factor;
uint32_t sdr_white_level;
@@ -468,7 +452,6 @@ struct dc_plane_state {
struct dc_hdr_static_metadata hdr_static_ctx;
enum dc_color_space color_space;
- enum color_transfer_func input_tf;
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
@@ -498,7 +481,6 @@ struct dc_plane_info {
enum dc_rotation_angle rotation;
enum plane_stereo_format stereo_format;
enum dc_color_space color_space;
- enum color_transfer_func input_tf;
unsigned int sdr_white_level;
bool horizontal_mirror;
bool visible;
@@ -525,10 +507,9 @@ struct dc_surface_update {
* null means no updates
*/
struct dc_gamma *gamma;
- enum color_transfer_func color_input_tf;
struct dc_transfer_func *in_transfer_func;
- struct csc_transform *input_csc_color_matrix;
+ struct dc_csc_transform *input_csc_color_matrix;
struct fixed31_32 *coeff_reduction_factor;
};
@@ -699,6 +680,7 @@ struct dc_cursor {
struct dc_cursor_attributes attributes;
};
+
/*******************************************************************************
* Interrupt interfaces
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 48e1fcf5..bd0fda0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -117,6 +117,65 @@ uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
return reg_val;
}
+uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+ uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
+ uint8_t shift6, uint32_t mask6, uint32_t *field_value6)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+ *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+ *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
+ *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
+ return reg_val;
+}
+
+uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+ uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
+ uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
+ uint8_t shift7, uint32_t mask7, uint32_t *field_value7)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+ *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+ *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
+ *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
+ *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
+ return reg_val;
+}
+
+uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+ uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
+ uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
+ uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
+ uint8_t shift8, uint32_t mask8, uint32_t *field_value8)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+ *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+ *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
+ *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
+ *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
+ *field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8);
+ return reg_val;
+}
/* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer
* compiler won't be able to check for size match and is prone to stack corruption type of bugs
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index b83a7dc..b1f7057 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -423,6 +423,11 @@ enum dc_gamma_type {
GAMMA_CS_TFM_1D = 3,
};
+struct dc_csc_transform {
+ uint16_t matrix[12];
+ bool enable_adjustment;
+};
+
struct dc_gamma {
struct kref refcount;
enum dc_gamma_type type;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index dc34515..8a716baa 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -51,6 +51,14 @@ struct link_mst_stream_allocation_table {
struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
};
+struct time_stamp {
+ uint64_t edp_poweroff;
+ uint64_t edp_poweron;
+};
+
+struct link_trace {
+ struct time_stamp time_stamp;
+};
/*
* A link contains one or more sinks and their connected status.
* The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
@@ -114,6 +122,7 @@ struct dc_link {
struct dc_link_status link_status;
+ struct link_trace link_trace;
};
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index d017df5..d7e6d53 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -58,18 +58,20 @@ struct dc_stream_state {
struct freesync_context freesync_ctx;
- struct dc_hdr_static_metadata hdr_static_metadata;
+ struct dc_info_packet hdr_static_metadata;
struct dc_transfer_func *out_transfer_func;
struct colorspace_transform gamut_remap_matrix;
- struct csc_transform csc_color_matrix;
+ struct dc_csc_transform csc_color_matrix;
enum dc_color_space output_color_space;
enum dc_dither_option dither_option;
enum view_3d_format view_format;
- enum color_transfer_func output_tf;
bool ignore_msa_timing_param;
+
+ unsigned long long periodic_fn_vsync_delta;
+
/* TODO: custom INFO packets */
/* TODO: ABM info (DMCU) */
/* PSR info */
@@ -110,9 +112,10 @@ struct dc_stream_update {
struct rect src;
struct rect dst;
struct dc_transfer_func *out_transfer_func;
- struct dc_hdr_static_metadata *hdr_static_metadata;
- enum color_transfer_func color_output_tf;
+ struct dc_info_packet *hdr_static_metadata;
unsigned int *abm_level;
+
+ unsigned long long *periodic_fn_vsync_delta;
};
bool dc_is_stream_unchanged(
@@ -131,13 +134,6 @@ bool dc_is_stream_scaling_unchanged(
* This does not trigger a flip. No surface address is programmed.
*/
-bool dc_commit_planes_to_stream(
- struct dc *dc,
- struct dc_plane_state **plane_states,
- uint8_t new_plane_count,
- struct dc_stream_state *dc_stream,
- struct dc_state *state);
-
void dc_commit_updates_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
@@ -209,14 +205,6 @@ bool dc_add_all_planes_for_stream(
enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
/*
- * This function takes a stream and checks if it is guaranteed to be supported.
- * Guaranteed means that MAX_COFUNC similar streams are supported.
- *
- * After this call:
- * No hardware is programmed for call. Only validation is done.
- */
-
-/*
* Set up streams and links associated to drive sinks
* The streams parameter is an absolute set of all active streams.
*
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 9441305..9defe3b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -370,12 +370,6 @@ struct dc_csc_adjustments {
struct fixed31_32 hue;
};
-enum {
- MAX_LANES = 2,
- MAX_COFUNC_PATH = 6,
- LAYER_INDEX_PRIMARY = -1,
-};
-
enum dpcd_downstream_port_max_bpc {
DOWN_STREAM_MAX_8BPC = 0,
DOWN_STREAM_MAX_10BPC,
@@ -530,6 +524,15 @@ struct vrr_params {
uint32_t frame_counter;
};
+struct dc_info_packet {
+ bool valid;
+ uint8_t hb0;
+ uint8_t hb1;
+ uint8_t hb2;
+ uint8_t hb3;
+ uint8_t sb[32];
+};
+
#define DC_PLANE_UPDATE_TIMES_MAX 10
struct dc_plane_flip_time {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 6d5cdcd..7f6d724 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -33,8 +33,9 @@
#define CTX \
aud->base.ctx
-#define DC_LOGGER \
- aud->base.ctx->logger
+
+#define DC_LOGGER_INIT()
+
#define REG(reg)\
(aud->regs->reg)
@@ -348,8 +349,8 @@ static void set_audio_latency(
void dce_aud_az_enable(struct audio *audio)
{
- struct dce_audio *aud = DCE_AUD(audio);
uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+ DC_LOGGER_INIT();
set_reg_field_value(value, 1,
AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
@@ -371,7 +372,7 @@ void dce_aud_az_enable(struct audio *audio)
void dce_aud_az_disable(struct audio *audio)
{
uint32_t value;
- struct dce_audio *aud = DCE_AUD(audio);
+ DC_LOGGER_INIT();
value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
set_reg_field_value(value, 1,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 0aa2cda..223db98 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -41,8 +41,9 @@
#define CTX \
clk_src->base.ctx
-#define DC_LOGGER \
- calc_pll_cs->ctx->logger
+
+#define DC_LOGGER_INIT()
+
#undef FN
#define FN(reg_name, field_name) \
clk_src->cs_shift->field_name, clk_src->cs_mask->field_name
@@ -467,7 +468,7 @@ static uint32_t dce110_get_pix_clk_dividers_helper (
{
uint32_t field = 0;
uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
- struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
+ DC_LOGGER_INIT();
/* Check if reference clock is external (not pcie/xtalin)
* HW Dce80 spec:
* 00 - PCIE_REFCLK, 01 - XTALIN, 02 - GENERICA, 03 - GENERICB
@@ -557,8 +558,8 @@ static uint32_t dce110_get_pix_clk_dividers(
struct pll_settings *pll_settings)
{
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
- struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
+ DC_LOGGER_INIT();
if (pix_clk_params == NULL || pll_settings == NULL
|| pix_clk_params->requested_pix_clk == 0) {
@@ -589,6 +590,9 @@ static uint32_t dce110_get_pix_clk_dividers(
pll_settings, pix_clk_params);
break;
case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+#endif
case DCE_VERSION_12_0:
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
@@ -978,6 +982,9 @@ static bool dce110_program_pix_clk(
break;
case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+#endif
case DCE_VERSION_12_0:
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
@@ -1054,7 +1061,7 @@ static void get_ss_info_from_atombios(
struct spread_spectrum_info *ss_info_cur;
struct spread_spectrum_data *ss_data_cur;
uint32_t i;
- struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll;
+ DC_LOGGER_INIT();
if (ss_entries_num == NULL) {
DC_LOG_SYNC(
"Invalid entry !!!\n");
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
index 4877243..0275d6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -53,7 +53,8 @@ void dce_pipe_control_lock(struct dc *dc,
struct dce_hwseq *hws = dc->hwseq;
/* Not lock pipe when blank */
- if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
+ if (lock && pipe->stream_res.tg->funcs->is_blanked &&
+ pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
return;
val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst],
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 8167cad..dbe3b26 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -113,6 +113,7 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
.connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe,
.enable_hpd = dce110_link_encoder_enable_hpd,
.disable_hpd = dce110_link_encoder_disable_hpd,
+ .is_dig_enabled = dce110_is_dig_enabled,
.destroy = dce110_link_encoder_destroy
};
@@ -535,8 +536,9 @@ void dce110_psr_program_secondary_packet(struct link_encoder *enc,
DP_SEC_GSP0_PRIORITY, 1);
}
-static bool is_dig_enabled(const struct dce110_link_encoder *enc110)
+bool dce110_is_dig_enabled(struct link_encoder *enc)
{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
uint32_t value;
REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value);
@@ -1031,7 +1033,7 @@ void dce110_link_encoder_disable_output(
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
- if (!is_dig_enabled(enc110)) {
+ if (!dce110_is_dig_enabled(enc)) {
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
return;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 0ec3433..3470694 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -263,4 +263,6 @@ void dce110_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
void dce110_psr_program_secondary_packet(struct link_encoder *enc,
unsigned int sdp_transmit_line_num_deadline);
+bool dce110_is_dig_enabled(struct link_encoder *enc);
+
#endif /* __DC_LINK_ENCODER__DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 0790f25..b235a75 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -174,6 +174,25 @@ static void program_urgency_watermark(
URGENCY_HIGH_WATERMARK, urgency_high_wm);
}
+static void dce120_program_urgency_watermark(
+ struct dce_mem_input *dce_mi,
+ uint32_t wm_select,
+ uint32_t urgency_low_wm,
+ uint32_t urgency_high_wm)
+{
+ REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+ URGENCY_WATERMARK_MASK, wm_select);
+
+ REG_SET_2(DPG_PIPE_URGENCY_CONTROL, 0,
+ URGENCY_LOW_WATERMARK, urgency_low_wm,
+ URGENCY_HIGH_WATERMARK, urgency_high_wm);
+
+ REG_SET_2(DPG_PIPE_URGENT_LEVEL_CONTROL, 0,
+ URGENT_LEVEL_LOW_WATERMARK, urgency_low_wm,
+ URGENT_LEVEL_HIGH_WATERMARK, urgency_high_wm);
+
+}
+
static void program_nbp_watermark(
struct dce_mem_input *dce_mi,
uint32_t wm_select,
@@ -206,6 +225,25 @@ static void program_nbp_watermark(
}
}
+static void dce120_program_stutter_watermark(
+ struct dce_mem_input *dce_mi,
+ uint32_t wm_select,
+ uint32_t stutter_mark,
+ uint32_t stutter_entry)
+{
+ REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select);
+
+ if (REG(DPG_PIPE_STUTTER_CONTROL2))
+ REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL2,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark,
+ STUTTER_ENTER_SELF_REFRESH_WATERMARK, stutter_entry);
+ else
+ REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark,
+ STUTTER_ENTER_SELF_REFRESH_WATERMARK, stutter_entry);
+}
+
static void program_stutter_watermark(
struct dce_mem_input *dce_mi,
uint32_t wm_select,
@@ -225,7 +263,8 @@ static void program_stutter_watermark(
static void dce_mi_program_display_marks(
struct mem_input *mi,
struct dce_watermarks nbp,
- struct dce_watermarks stutter,
+ struct dce_watermarks stutter_exit,
+ struct dce_watermarks stutter_enter,
struct dce_watermarks urgent,
uint32_t total_dest_line_time_ns)
{
@@ -243,13 +282,14 @@ static void dce_mi_program_display_marks(
program_nbp_watermark(dce_mi, 2, nbp.a_mark); /* set a */
program_nbp_watermark(dce_mi, 1, nbp.d_mark); /* set d */
- program_stutter_watermark(dce_mi, 2, stutter.a_mark); /* set a */
- program_stutter_watermark(dce_mi, 1, stutter.d_mark); /* set d */
+ program_stutter_watermark(dce_mi, 2, stutter_exit.a_mark); /* set a */
+ program_stutter_watermark(dce_mi, 1, stutter_exit.d_mark); /* set d */
}
-static void dce120_mi_program_display_marks(struct mem_input *mi,
+static void dce112_mi_program_display_marks(struct mem_input *mi,
struct dce_watermarks nbp,
- struct dce_watermarks stutter,
+ struct dce_watermarks stutter_exit,
+ struct dce_watermarks stutter_entry,
struct dce_watermarks urgent,
uint32_t total_dest_line_time_ns)
{
@@ -273,10 +313,43 @@ static void dce120_mi_program_display_marks(struct mem_input *mi,
program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */
program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */
- program_stutter_watermark(dce_mi, 0, stutter.a_mark); /* set a */
- program_stutter_watermark(dce_mi, 1, stutter.b_mark); /* set b */
- program_stutter_watermark(dce_mi, 2, stutter.c_mark); /* set c */
- program_stutter_watermark(dce_mi, 3, stutter.d_mark); /* set d */
+ program_stutter_watermark(dce_mi, 0, stutter_exit.a_mark); /* set a */
+ program_stutter_watermark(dce_mi, 1, stutter_exit.b_mark); /* set b */
+ program_stutter_watermark(dce_mi, 2, stutter_exit.c_mark); /* set c */
+ program_stutter_watermark(dce_mi, 3, stutter_exit.d_mark); /* set d */
+}
+
+static void dce120_mi_program_display_marks(struct mem_input *mi,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter_exit,
+ struct dce_watermarks stutter_entry,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
+
+ dce120_program_urgency_watermark(dce_mi, 0, /* set a */
+ urgent.a_mark, total_dest_line_time_ns);
+ dce120_program_urgency_watermark(dce_mi, 1, /* set b */
+ urgent.b_mark, total_dest_line_time_ns);
+ dce120_program_urgency_watermark(dce_mi, 2, /* set c */
+ urgent.c_mark, total_dest_line_time_ns);
+ dce120_program_urgency_watermark(dce_mi, 3, /* set d */
+ urgent.d_mark, total_dest_line_time_ns);
+
+ REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE, stutter_en,
+ STUTTER_IGNORE_FBC, 1);
+ program_nbp_watermark(dce_mi, 0, nbp.a_mark); /* set a */
+ program_nbp_watermark(dce_mi, 1, nbp.b_mark); /* set b */
+ program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */
+ program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */
+
+ dce120_program_stutter_watermark(dce_mi, 0, stutter_exit.a_mark, stutter_entry.a_mark); /* set a */
+ dce120_program_stutter_watermark(dce_mi, 1, stutter_exit.b_mark, stutter_entry.b_mark); /* set b */
+ dce120_program_stutter_watermark(dce_mi, 2, stutter_exit.c_mark, stutter_entry.c_mark); /* set c */
+ dce120_program_stutter_watermark(dce_mi, 3, stutter_exit.d_mark, stutter_entry.d_mark); /* set d */
}
static void program_tiling(
@@ -696,5 +769,17 @@ void dce112_mem_input_construct(
const struct dce_mem_input_mask *mi_mask)
{
dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
+ dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks;
+}
+
+void dce120_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask)
+{
+ dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
index 05d39c0..d15b0d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
@@ -106,6 +106,7 @@ struct dce_mem_input_registers {
uint32_t DPG_PIPE_ARBITRATION_CONTROL1;
uint32_t DPG_WATERMARK_MASK_CONTROL;
uint32_t DPG_PIPE_URGENCY_CONTROL;
+ uint32_t DPG_PIPE_URGENT_LEVEL_CONTROL;
uint32_t DPG_PIPE_NB_PSTATE_CHANGE_CONTROL;
uint32_t DPG_PIPE_LOW_POWER_CONTROL;
uint32_t DPG_PIPE_STUTTER_CONTROL;
@@ -213,6 +214,11 @@ struct dce_mem_input_registers {
#define MI_DCE12_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\
SFB(blk, DPG_PIPE_STUTTER_CONTROL2, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL2, STUTTER_ENTER_SELF_REFRESH_WATERMARK, mask_sh),\
+ SFB(blk, DPG_PIPE_URGENT_LEVEL_CONTROL, URGENT_LEVEL_LOW_WATERMARK, mask_sh),\
+ SFB(blk, DPG_PIPE_URGENT_LEVEL_CONTROL, URGENT_LEVEL_HIGH_WATERMARK, mask_sh),\
+ SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, mask_sh),\
+ SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, mask_sh),\
SFB(blk, DPG_WATERMARK_MASK_CONTROL, PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_ENABLE, mask_sh),\
SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
@@ -286,6 +292,8 @@ struct dce_mem_input_registers {
type STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK; \
type URGENCY_LOW_WATERMARK; \
type URGENCY_HIGH_WATERMARK; \
+ type URGENT_LEVEL_LOW_WATERMARK;\
+ type URGENT_LEVEL_HIGH_WATERMARK;\
type NB_PSTATE_CHANGE_ENABLE; \
type NB_PSTATE_CHANGE_URGENT_DURING_REQUEST; \
type NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \
@@ -297,6 +305,7 @@ struct dce_mem_input_registers {
type STUTTER_ENABLE; \
type STUTTER_IGNORE_FBC; \
type STUTTER_EXIT_SELF_REFRESH_WATERMARK; \
+ type STUTTER_ENTER_SELF_REFRESH_WATERMARK; \
type DMIF_BUFFERS_ALLOCATED; \
type DMIF_BUFFERS_ALLOCATION_COMPLETED; \
type ENABLE; /* MC_HUB_RDREQ_DMIF_LIMIT */\
@@ -344,4 +353,12 @@ void dce112_mem_input_construct(
const struct dce_mem_input_shift *mi_shift,
const struct dce_mem_input_mask *mi_mask);
+void dce120_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask);
+
#endif /*__DCE_MEM_INPUT_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 162f6a6..e265a0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -26,27 +26,10 @@
#include "dc_bios_types.h"
#include "dce_stream_encoder.h"
#include "reg_helper.h"
+#include "hw_shared.h"
+
#define DC_LOGGER \
enc110->base.ctx->logger
-enum DP_PIXEL_ENCODING {
-DP_PIXEL_ENCODING_RGB444 = 0x00000000,
-DP_PIXEL_ENCODING_YCBCR422 = 0x00000001,
-DP_PIXEL_ENCODING_YCBCR444 = 0x00000002,
-DP_PIXEL_ENCODING_RGB_WIDE_GAMUT = 0x00000003,
-DP_PIXEL_ENCODING_Y_ONLY = 0x00000004,
-DP_PIXEL_ENCODING_YCBCR420 = 0x00000005,
-DP_PIXEL_ENCODING_RESERVED = 0x00000006,
-};
-
-
-enum DP_COMPONENT_DEPTH {
-DP_COMPONENT_DEPTH_6BPC = 0x00000000,
-DP_COMPONENT_DEPTH_8BPC = 0x00000001,
-DP_COMPONENT_DEPTH_10BPC = 0x00000002,
-DP_COMPONENT_DEPTH_12BPC = 0x00000003,
-DP_COMPONENT_DEPTH_16BPC = 0x00000004,
-DP_COMPONENT_DEPTH_RESERVED = 0x00000005,
-};
#define REG(reg)\
@@ -80,7 +63,7 @@ enum {
static void dce110_update_generic_info_packet(
struct dce110_stream_encoder *enc110,
uint32_t packet_index,
- const struct encoder_info_packet *info_packet)
+ const struct dc_info_packet *info_packet)
{
uint32_t regval;
/* TODOFPGA Figure out a proper number for max_retries polling for lock
@@ -196,7 +179,7 @@ static void dce110_update_generic_info_packet(
static void dce110_update_hdmi_info_packet(
struct dce110_stream_encoder *enc110,
uint32_t packet_index,
- const struct encoder_info_packet *info_packet)
+ const struct dc_info_packet *info_packet)
{
uint32_t cont, send, line;
@@ -314,11 +297,11 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
switch (crtc_timing->pixel_encoding) {
case PIXEL_ENCODING_YCBCR422:
REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_YCBCR422);
+ DP_PIXEL_ENCODING_TYPE_YCBCR422);
break;
case PIXEL_ENCODING_YCBCR444:
REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_YCBCR444);
+ DP_PIXEL_ENCODING_TYPE_YCBCR444);
if (crtc_timing->flags.Y_ONLY)
if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
@@ -326,7 +309,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
* Color depth of Y-only could be
* 8, 10, 12, 16 bits */
REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_Y_ONLY);
+ DP_PIXEL_ENCODING_TYPE_Y_ONLY);
/* Note: DP_MSA_MISC1 bit 7 is the indicator
* of Y-only mode.
* This bit is set in HW if register
@@ -334,7 +317,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
break;
case PIXEL_ENCODING_YCBCR420:
REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_YCBCR420);
+ DP_PIXEL_ENCODING_TYPE_YCBCR420);
if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
@@ -345,7 +328,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
break;
default:
REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
- DP_PIXEL_ENCODING_RGB444);
+ DP_PIXEL_ENCODING_TYPE_RGB444);
break;
}
@@ -363,20 +346,20 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
break;
case COLOR_DEPTH_888:
REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_DEPTH_8BPC);
+ DP_COMPONENT_PIXEL_DEPTH_8BPC);
break;
case COLOR_DEPTH_101010:
REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_DEPTH_10BPC);
+ DP_COMPONENT_PIXEL_DEPTH_10BPC);
break;
case COLOR_DEPTH_121212:
REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_DEPTH_12BPC);
+ DP_COMPONENT_PIXEL_DEPTH_12BPC);
break;
default:
REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
- DP_COMPONENT_DEPTH_6BPC);
+ DP_COMPONENT_PIXEL_DEPTH_6BPC);
break;
}
@@ -836,7 +819,7 @@ static void dce110_stream_encoder_update_dp_info_packets(
const struct encoder_info_frame *info_frame)
{
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
- uint32_t value = REG_READ(DP_SEC_CNTL);
+ uint32_t value = 0;
if (info_frame->vsc.valid)
dce110_update_generic_info_packet(
@@ -870,6 +853,7 @@ static void dce110_stream_encoder_update_dp_info_packets(
* Therefore we need to enable master bit
* if at least on of the fields is not 0
*/
+ value = REG_READ(DP_SEC_CNTL);
if (value)
REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
}
@@ -879,7 +863,7 @@ static void dce110_stream_encoder_stop_dp_info_packets(
{
/* stop generic packets on DP */
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
- uint32_t value = REG_READ(DP_SEC_CNTL);
+ uint32_t value = 0;
if (enc110->se_mask->DP_SEC_AVI_ENABLE) {
REG_SET_7(DP_SEC_CNTL, 0,
@@ -892,25 +876,10 @@ static void dce110_stream_encoder_stop_dp_info_packets(
DP_SEC_STREAM_ENABLE, 0);
}
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- if (enc110->se_mask->DP_SEC_GSP7_ENABLE) {
- REG_SET_10(DP_SEC_CNTL, 0,
- DP_SEC_GSP0_ENABLE, 0,
- DP_SEC_GSP1_ENABLE, 0,
- DP_SEC_GSP2_ENABLE, 0,
- DP_SEC_GSP3_ENABLE, 0,
- DP_SEC_GSP4_ENABLE, 0,
- DP_SEC_GSP5_ENABLE, 0,
- DP_SEC_GSP6_ENABLE, 0,
- DP_SEC_GSP7_ENABLE, 0,
- DP_SEC_MPG_ENABLE, 0,
- DP_SEC_STREAM_ENABLE, 0);
- }
-#endif
/* this register shared with audio info frame.
* therefore we need to keep master enabled
* if at least one of the fields is not 0 */
-
+ value = REG_READ(DP_SEC_CNTL);
if (value)
REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
@@ -1513,7 +1482,7 @@ static void dce110_se_disable_dp_audio(
struct stream_encoder *enc)
{
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
- uint32_t value = REG_READ(DP_SEC_CNTL);
+ uint32_t value = 0;
/* Disable Audio packets */
REG_UPDATE_5(DP_SEC_CNTL,
@@ -1525,6 +1494,7 @@ static void dce110_se_disable_dp_audio(
/* This register shared with encoder info frame. Therefore we need to
keep master enabled if at least on of the fields is not 0 */
+ value = REG_READ(DP_SEC_CNTL);
if (value != 0)
REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 3092f76..38ec0d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -733,38 +733,6 @@ enum dc_status dce100_add_stream_to_ctx(
return result;
}
-enum dc_status dce100_validate_guaranteed(
- struct dc *dc,
- struct dc_stream_state *dc_stream,
- struct dc_state *context)
-{
- enum dc_status result = DC_ERROR_UNEXPECTED;
-
- context->streams[0] = dc_stream;
- dc_stream_retain(context->streams[0]);
- context->stream_count++;
-
- result = resource_map_pool_resources(dc, context, dc_stream);
-
- if (result == DC_OK)
- result = resource_map_clock_resources(dc, context, dc_stream);
-
- if (result == DC_OK)
- result = build_mapped_resource(dc, context, dc_stream);
-
- if (result == DC_OK) {
- validate_guaranteed_copy_streams(
- context, dc->caps.max_streams);
- result = resource_build_scaling_params_for_context(dc, context);
- }
-
- if (result == DC_OK)
- if (!dce100_validate_bandwidth(dc, context))
- result = DC_FAIL_BANDWIDTH_VALIDATE;
-
- return result;
-}
-
static void dce100_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
@@ -786,7 +754,6 @@ enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, s
static const struct resource_funcs dce100_res_pool_funcs = {
.destroy = dce100_destroy_resource_pool,
.link_enc_create = dce100_link_encoder_create,
- .validate_guaranteed = dce100_validate_guaranteed,
.validate_bandwidth = dce100_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index d057599..2288d0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -70,8 +70,9 @@
#define CTX \
hws->ctx
-#define DC_LOGGER \
- ctx->logger
+
+#define DC_LOGGER_INIT()
+
#define REG(reg)\
hws->regs->reg
@@ -279,7 +280,9 @@ dce110_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
build_prescale_params(&prescale_params, plane_state);
ipp->funcs->ipp_program_prescale(ipp, &prescale_params);
- if (plane_state->gamma_correction && dce_use_lut(plane_state->format))
+ if (plane_state->gamma_correction &&
+ !plane_state->gamma_correction->is_identity &&
+ dce_use_lut(plane_state->format))
ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction);
if (tf == NULL) {
@@ -851,6 +854,28 @@ void hwss_edp_power_control(
if (power_up != is_panel_powered_on(hwseq)) {
/* Send VBIOS command to prompt eDP panel power */
+ if (power_up) {
+ unsigned long long current_ts = dm_get_timestamp(ctx);
+ unsigned long long duration_in_ms =
+ dm_get_elapse_time_in_ns(
+ ctx,
+ current_ts,
+ div64_u64(link->link_trace.time_stamp.edp_poweroff, 1000000));
+ unsigned long long wait_time_ms = 0;
+
+ /* max 500ms from LCDVDD off to on */
+ if (link->link_trace.time_stamp.edp_poweroff == 0)
+ wait_time_ms = 500;
+ else if (duration_in_ms < 500)
+ wait_time_ms = 500 - duration_in_ms;
+
+ if (wait_time_ms) {
+ msleep(wait_time_ms);
+ dm_output_to_console("%s: wait %lld ms to power on eDP.\n",
+ __func__, wait_time_ms);
+ }
+
+ }
DC_LOG_HW_RESUME_S3(
"%s: Panel Power action: %s\n",
@@ -864,9 +889,14 @@ void hwss_edp_power_control(
cntl.coherent = false;
cntl.lanes_number = LANE_COUNT_FOUR;
cntl.hpd_sel = link->link_enc->hpd_source;
-
bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
+ if (!power_up)
+ /*save driver power off time stamp*/
+ link->link_trace.time_stamp.edp_poweroff = dm_get_timestamp(ctx);
+ else
+ link->link_trace.time_stamp.edp_poweron = dm_get_timestamp(ctx);
+
if (bp_result != BP_RESULT_OK)
DC_LOG_ERROR(
"%s: Panel Power bp_result: %d\n",
@@ -1011,7 +1041,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
link->dc->hwss.edp_backlight_control(link, true);
- stream->bl_pwm_level = 0;
+ stream->bl_pwm_level = EDP_BACKLIGHT_RAMP_DISABLE_LEVEL;
}
}
void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
@@ -1203,7 +1233,7 @@ static void program_scaler(const struct dc *dc,
&pipe_ctx->plane_res.scl_data);
}
-static enum dc_status dce110_prog_pixclk_crtc_otg(
+static enum dc_status dce110_enable_stream_timing(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct dc *dc)
@@ -1269,7 +1299,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx[pipe_ctx->pipe_idx];
/* */
- dc->hwss.prog_pixclk_crtc_otg(pipe_ctx, context, dc);
+ dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
/* FPGA does not program backend */
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
@@ -1441,6 +1471,17 @@ static void disable_vga_and_power_gate_all_controllers(
}
}
+static struct dc_link *get_link_for_edp(struct dc *dc)
+{
+ int i;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
+ return dc->links[i];
+ }
+ return NULL;
+}
+
static struct dc_link *get_link_for_edp_not_in_use(
struct dc *dc,
struct dc_state *context)
@@ -1475,20 +1516,21 @@ static struct dc_link *get_link_for_edp_not_in_use(
*/
void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
{
- struct dc_bios *dcb = dc->ctx->dc_bios;
-
- /* vbios already light up eDP, so we can leverage vbios and skip eDP
- * programming
- */
- bool can_eDP_fast_boot_optimize =
- (dcb->funcs->get_vga_enabled_displays(dc->ctx->dc_bios) == ATOM_DISPLAY_LCD1_ACTIVE);
-
- /* if OS doesn't light up eDP and eDP link is available, we want to disable */
struct dc_link *edp_link_to_turnoff = NULL;
+ struct dc_link *edp_link = get_link_for_edp(dc);
+ bool can_eDP_fast_boot_optimize = false;
+
+ if (edp_link) {
+ can_eDP_fast_boot_optimize =
+ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc);
+ }
if (can_eDP_fast_boot_optimize) {
edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context);
+ /* if OS doesn't light up eDP and eDP link is available, we want to disable
+ * If resume from S4/S5, should optimization.
+ */
if (!edp_link_to_turnoff)
dc->apply_edp_fast_boot_optimization = true;
}
@@ -1544,6 +1586,7 @@ static void dce110_set_displaymarks(
pipe_ctx->plane_res.mi,
context->bw.dce.nbp_state_change_wm_ns[num_pipes],
context->bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw.dce.stutter_entry_wm_ns[num_pipes],
context->bw.dce.urgent_wm_ns[num_pipes],
total_dest_line_time_ns);
if (i == underlay_idx) {
@@ -1569,6 +1612,7 @@ static void set_safe_displaymarks(
MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK };
struct dce_watermarks nbp_marks = {
SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK };
+ struct dce_watermarks min_marks = { 0, 0, 0, 0};
for (i = 0; i < MAX_PIPES; i++) {
if (res_ctx->pipe_ctx[i].stream == NULL || res_ctx->pipe_ctx[i].plane_res.mi == NULL)
@@ -1578,6 +1622,7 @@ static void set_safe_displaymarks(
res_ctx->pipe_ctx[i].plane_res.mi,
nbp_marks,
max_marks,
+ min_marks,
max_marks,
MAX_WATERMARK);
@@ -1803,6 +1848,9 @@ static bool should_enable_fbc(struct dc *dc,
}
}
+ /* Pipe context should be found */
+ ASSERT(pipe_ctx);
+
/* Only supports eDP */
if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
return false;
@@ -2699,8 +2747,11 @@ static void dce110_program_front_end_for_pipe(
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
+#endif
unsigned int i;
- struct dc_context *ctx = dc->ctx;
+ DC_LOGGER_INIT();
memset(&tbl_entry, 0, sizeof(tbl_entry));
if (dc->current_state)
@@ -2740,7 +2791,9 @@ static void dce110_program_front_end_for_pipe(
program_scaler(dc, pipe_ctx);
#if defined(CONFIG_DRM_AMD_DC_FBC)
- if (dc->fbc_compressor && old_pipe->stream) {
+ /* fbc not applicable on Underlay pipe */
+ if (dc->fbc_compressor && old_pipe->stream &&
+ pipe_ctx->pipe_idx != underlay_idx) {
if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
else
@@ -2776,13 +2829,13 @@ static void dce110_program_front_end_for_pipe(
dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
DC_LOG_SURFACE(
- "Pipe:%d 0x%x: addr hi:0x%x, "
+ "Pipe:%d %p: addr hi:0x%x, "
"addr low:0x%x, "
"src: %d, %d, %d,"
" %d; dst: %d, %d, %d, %d;"
"clip: %d, %d, %d, %d\n",
pipe_ctx->pipe_idx,
- pipe_ctx->plane_state,
+ (void *) pipe_ctx->plane_state,
pipe_ctx->plane_state->address.grph.addr.high_part,
pipe_ctx->plane_state->address.grph.addr.low_part,
pipe_ctx->plane_state->src_rect.x,
@@ -2993,7 +3046,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.get_position = get_position,
.set_static_screen_control = set_static_screen_control,
.reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
- .prog_pixclk_crtc_otg = dce110_prog_pixclk_crtc_otg,
+ .enable_stream_timing = dce110_enable_stream_timing,
.setup_stereo = NULL,
.set_avmute = dce110_set_avmute,
.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
index 7bab8c6..0564c8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -923,6 +923,7 @@ void dce_mem_input_v_program_display_marks(
struct mem_input *mem_input,
struct dce_watermarks nbp,
struct dce_watermarks stutter,
+ struct dce_watermarks stutter_enter,
struct dce_watermarks urgent,
uint32_t total_dest_line_time_ns)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index b1f14be..ee33786 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -930,38 +930,6 @@ static enum dc_status dce110_add_stream_to_ctx(
return result;
}
-static enum dc_status dce110_validate_guaranteed(
- struct dc *dc,
- struct dc_stream_state *dc_stream,
- struct dc_state *context)
-{
- enum dc_status result = DC_ERROR_UNEXPECTED;
-
- context->streams[0] = dc_stream;
- dc_stream_retain(context->streams[0]);
- context->stream_count++;
-
- result = resource_map_pool_resources(dc, context, dc_stream);
-
- if (result == DC_OK)
- result = resource_map_clock_resources(dc, context, dc_stream);
-
- if (result == DC_OK)
- result = build_mapped_resource(dc, context, dc_stream);
-
- if (result == DC_OK) {
- validate_guaranteed_copy_streams(
- context, dc->caps.max_streams);
- result = resource_build_scaling_params_for_context(dc, context);
- }
-
- if (result == DC_OK)
- if (!dce110_validate_bandwidth(dc, context))
- result = DC_FAIL_BANDWIDTH_VALIDATE;
-
- return result;
-}
-
static struct pipe_ctx *dce110_acquire_underlay(
struct dc_state *context,
const struct resource_pool *pool,
@@ -1036,7 +1004,6 @@ static void dce110_destroy_resource_pool(struct resource_pool **pool)
static const struct resource_funcs dce110_res_pool_funcs = {
.destroy = dce110_destroy_resource_pool,
.link_enc_create = dce110_link_encoder_create,
- .validate_guaranteed = dce110_validate_guaranteed,
.validate_bandwidth = dce110_validate_bandwidth,
.validate_plane = dce110_validate_plane,
.acquire_idle_pipe_for_layer = dce110_acquire_underlay,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
index be71539..1b2fe0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -431,14 +431,6 @@ void dce110_timing_generator_set_drr(
0,
CRTC_V_TOTAL_CONTROL,
CRTC_SET_V_TOTAL_MIN_MASK);
- set_reg_field_value(v_total_min,
- 0,
- CRTC_V_TOTAL_MIN,
- CRTC_V_TOTAL_MIN);
- set_reg_field_value(v_total_max,
- 0,
- CRTC_V_TOTAL_MAX,
- CRTC_V_TOTAL_MAX);
set_reg_field_value(v_total_cntl,
0,
CRTC_V_TOTAL_CONTROL,
@@ -447,6 +439,14 @@ void dce110_timing_generator_set_drr(
0,
CRTC_V_TOTAL_CONTROL,
CRTC_V_TOTAL_MAX_SEL);
+ set_reg_field_value(v_total_min,
+ 0,
+ CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN);
+ set_reg_field_value(v_total_max,
+ 0,
+ CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX);
set_reg_field_value(v_total_cntl,
0,
CRTC_V_TOTAL_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index 8ad0481..a3cef60 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -648,12 +648,6 @@ static void dce110_timing_generator_v_disable_vga(
return;
}
-static bool dce110_tg_v_is_blanked(struct timing_generator *tg)
-{
- /* Signal comes from the primary pipe, underlay is never blanked. */
- return false;
-}
-
/** ********************************************************************************************
*
* DCE11 Timing Generator Constructor / Destructor
@@ -670,7 +664,6 @@ static const struct timing_generator_funcs dce110_tg_v_funcs = {
.set_early_control = dce110_timing_generator_v_set_early_control,
.wait_for_state = dce110_timing_generator_v_wait_for_state,
.set_blank = dce110_timing_generator_v_set_blank,
- .is_blanked = dce110_tg_v_is_blanked,
.set_colors = dce110_timing_generator_v_set_colors,
.set_overscan_blank_color =
dce110_timing_generator_v_set_overscan_color_black,
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index cd1e3f7..00c0a1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -430,7 +430,7 @@ static struct stream_encoder *dce112_stream_encoder_create(
if (!enc110)
return NULL;
-
+
dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
@@ -867,38 +867,6 @@ enum dc_status dce112_add_stream_to_ctx(
return result;
}
-enum dc_status dce112_validate_guaranteed(
- struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_state *context)
-{
- enum dc_status result = DC_ERROR_UNEXPECTED;
-
- context->streams[0] = stream;
- dc_stream_retain(context->streams[0]);
- context->stream_count++;
-
- result = resource_map_pool_resources(dc, context, stream);
-
- if (result == DC_OK)
- result = resource_map_phy_clock_resources(dc, context, stream);
-
- if (result == DC_OK)
- result = build_mapped_resource(dc, context, stream);
-
- if (result == DC_OK) {
- validate_guaranteed_copy_streams(
- context, dc->caps.max_streams);
- result = resource_build_scaling_params_for_context(dc, context);
- }
-
- if (result == DC_OK)
- if (!dce112_validate_bandwidth(dc, context))
- result = DC_FAIL_BANDWIDTH_VALIDATE;
-
- return result;
-}
-
enum dc_status dce112_validate_global(
struct dc *dc,
struct dc_state *context)
@@ -921,7 +889,6 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool)
static const struct resource_funcs dce112_res_pool_funcs = {
.destroy = dce112_destroy_resource_pool,
.link_enc_create = dce112_link_encoder_create,
- .validate_guaranteed = dce112_validate_guaranteed,
.validate_bandwidth = dce112_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce112_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
index d5c19d3..95a4033 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
@@ -42,11 +42,6 @@ enum dc_status dce112_validate_with_context(
struct dc_state *context,
struct dc_state *old_context);
-enum dc_status dce112_validate_guaranteed(
- struct dc *dc,
- struct dc_stream_state *dc_stream,
- struct dc_state *context);
-
bool dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 4659a4b..fda0157 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -652,7 +652,7 @@ static struct mem_input *dce120_mem_input_create(
return NULL;
}
- dce112_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ dce120_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
return &dce_mi->base;
}
@@ -684,7 +684,6 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool)
static const struct resource_funcs dce120_res_pool_funcs = {
.destroy = dce120_destroy_resource_pool,
.link_enc_create = dce120_link_encoder_create,
- .validate_guaranteed = dce112_validate_guaranteed,
.validate_bandwidth = dce112_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce112_add_stream_to_ctx
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 7bee781..2ea490f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -570,12 +570,6 @@ void dce120_timing_generator_set_drr(
0x180);
} else {
- CRTC_REG_UPDATE(
- CRTC0_CRTC_V_TOTAL_MIN,
- CRTC_V_TOTAL_MIN, 0);
- CRTC_REG_UPDATE(
- CRTC0_CRTC_V_TOTAL_MAX,
- CRTC_V_TOTAL_MAX, 0);
CRTC_REG_SET_N(CRTC0_CRTC_V_TOTAL_CONTROL, 5,
FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL), 0,
FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL), 0,
@@ -583,6 +577,12 @@ void dce120_timing_generator_set_drr(
FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC), 0,
FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK), 0);
CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN, 0);
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX, 0);
+ CRTC_REG_UPDATE(
CRTC0_CRTC_STATIC_SCREEN_CONTROL,
CRTC_STATIC_SCREEN_EVENT_MASK,
0);
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 5d854a3..48a0689 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -691,23 +691,6 @@ static void destruct(struct dce110_resource_pool *pool)
}
}
-static enum dc_status build_mapped_resource(
- const struct dc *dc,
- struct dc_state *context,
- struct dc_stream_state *stream)
-{
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
-
- if (!pipe_ctx)
- return DC_ERROR_UNEXPECTED;
-
- dce110_resource_build_pipe_hw_param(pipe_ctx);
-
- resource_build_info_frame(pipe_ctx);
-
- return DC_OK;
-}
-
bool dce80_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
@@ -749,37 +732,6 @@ enum dc_status dce80_validate_global(
return DC_OK;
}
-enum dc_status dce80_validate_guaranteed(
- struct dc *dc,
- struct dc_stream_state *dc_stream,
- struct dc_state *context)
-{
- enum dc_status result = DC_ERROR_UNEXPECTED;
-
- context->streams[0] = dc_stream;
- dc_stream_retain(context->streams[0]);
- context->stream_count++;
-
- result = resource_map_pool_resources(dc, context, dc_stream);
-
- if (result == DC_OK)
- result = resource_map_clock_resources(dc, context, dc_stream);
-
- if (result == DC_OK)
- result = build_mapped_resource(dc, context, dc_stream);
-
- if (result == DC_OK) {
- validate_guaranteed_copy_streams(
- context, dc->caps.max_streams);
- result = resource_build_scaling_params_for_context(dc, context);
- }
-
- if (result == DC_OK)
- result = dce80_validate_bandwidth(dc, context);
-
- return result;
-}
-
static void dce80_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
@@ -792,7 +744,6 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool)
static const struct resource_funcs dce80_res_pool_funcs = {
.destroy = dce80_destroy_resource_pool,
.link_enc_create = dce80_link_encoder_create,
- .validate_guaranteed = dce80_validate_guaranteed,
.validate_bandwidth = dce80_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 5469bdf..5c69743 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -26,7 +26,7 @@ DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
dcn10_hubp.o dcn10_mpc.o \
dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
- dcn10_hubbub.o
+ dcn10_hubbub.o dcn10_stream_encoder.o
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 881a1bf..96d5878 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -367,15 +367,15 @@ bool cm_helper_translate_curve_to_hw_format(
lut_params->hw_points_num = hw_points;
- i = 1;
- for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
+ k = 0;
+ for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
if (seg_distr[k] != -1) {
lut_params->arr_curve_points[k].segments_num =
seg_distr[k];
lut_params->arr_curve_points[i].offset =
lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
}
- i++;
+ k++;
}
if (seg_distr[k] != -1)
@@ -529,15 +529,15 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
lut_params->hw_points_num = hw_points;
- i = 1;
- for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
+ k = 0;
+ for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
if (seg_distr[k] != -1) {
lut_params->arr_curve_points[k].segments_num =
seg_distr[k];
lut_params->arr_curve_points[i].offset =
lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
}
- i++;
+ k++;
}
if (seg_distr[k] != -1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index e305c28..8c4d9e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -98,6 +98,30 @@ enum gamut_remap_select {
GAMUT_REMAP_COMB_COEFF
};
+void dpp_read_state(struct dpp *dpp_base,
+ struct dcn_dpp_state *s)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_GET(CM_IGAM_CONTROL,
+ CM_IGAM_LUT_MODE, &s->igam_lut_mode);
+ REG_GET(CM_IGAM_CONTROL,
+ CM_IGAM_INPUT_FORMAT, &s->igam_input_format);
+ REG_GET(CM_DGAM_CONTROL,
+ CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
+ REG_GET(CM_RGAM_CONTROL,
+ CM_RGAM_LUT_MODE, &s->rgam_lut_mode);
+ REG_GET(CM_GAMUT_REMAP_CONTROL,
+ CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode);
+
+ s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12);
+ s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14);
+ s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22);
+ s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24);
+ s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32);
+ s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34);
+}
+
/* Program gamut remap in bypass mode */
void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
{
@@ -121,6 +145,13 @@ bool dpp_get_optimal_number_of_taps(
else
pixel_width = scl_data->viewport.width;
+ /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
+ if (scl_data->viewport.width != scl_data->h_active &&
+ scl_data->viewport.height != scl_data->v_active &&
+ dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
+ scl_data->format == PIXEL_FORMAT_FP16)
+ return false;
+
/* TODO: add lb check */
/* No support for programming ratio of 4, drop to 3.99999.. */
@@ -257,7 +288,7 @@ void dpp1_cnv_setup (
struct dpp *dpp_base,
enum surface_pixel_format format,
enum expansion_mode mode,
- struct csc_transform input_csc_color_matrix,
+ struct dc_csc_transform input_csc_color_matrix,
enum dc_color_space input_color_space)
{
uint32_t pixel_format;
@@ -416,7 +447,7 @@ void dpp1_set_cursor_position(
if (src_x_offset >= (int)param->viewport_width)
cur_en = 0; /* not visible beyond right edge*/
- if (src_x_offset + (int)width < 0)
+ if (src_x_offset + (int)width <= 0)
cur_en = 0; /* not visible beyond left edge*/
REG_UPDATE(CURSOR0_CONTROL,
@@ -443,6 +474,7 @@ void dpp1_dppclk_control(
}
static const struct dpp_funcs dcn10_dpp_funcs = {
+ .dpp_read_state = dpp_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
.dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index 17b062a..5944a3b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -44,6 +44,10 @@
#define TF_REG_LIST_DCN(id) \
SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\
SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\
+ SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\
+ SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\
+ SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\
+ SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\
SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\
SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \
SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \
@@ -108,6 +112,8 @@
SRI(CM_DGAM_LUT_DATA, CM, id), \
SRI(CM_CONTROL, CM, id), \
SRI(CM_DGAM_CONTROL, CM, id), \
+ SRI(CM_TEST_DEBUG_INDEX, CM, id), \
+ SRI(CM_TEST_DEBUG_DATA, CM, id), \
SRI(FORMAT_CONTROL, CNVC_CFG, id), \
SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
@@ -175,6 +181,14 @@
TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\
TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\
TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\
TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\
TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\
TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\
@@ -300,6 +314,7 @@
TF_SF(CM0_CM_DGAM_LUT_INDEX, CM_DGAM_LUT_INDEX, mask_sh), \
TF_SF(CM0_CM_DGAM_LUT_DATA, CM_DGAM_LUT_DATA, mask_sh), \
TF_SF(CM0_CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, mask_sh), \
+ TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \
TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
@@ -417,6 +432,41 @@
TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
TF_SF(DPP_TOP0_DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh)
+/*
+ *
+ DCN1 CM debug status register definition
+
+ register :ID9_CM_STATUS do
+ implement_ref :cm
+ map to: :cmdebugind, at: j
+ width 32
+ disclosure NEVER
+
+ field :ID9_VUPDATE_CFG, [0], R
+ field :ID9_IGAM_LUT_MODE, [2..1], R
+ field :ID9_BNS_BYPASS, [3], R
+ field :ID9_ICSC_MODE, [5..4], R
+ field :ID9_DGAM_LUT_MODE, [8..6], R
+ field :ID9_HDR_BYPASS, [9], R
+ field :ID9_GAMUT_REMAP_MODE, [11..10], R
+ field :ID9_RGAM_LUT_MODE, [14..12], R
+ #1 free bit
+ field :ID9_OCSC_MODE, [18..16], R
+ field :ID9_DENORM_MODE, [21..19], R
+ field :ID9_ROUND_TRUNC_MODE, [25..22], R
+ field :ID9_DITHER_EN, [26], R
+ field :ID9_DITHER_MODE, [28..27], R
+ end
+*/
+
+#define TF_DEBUG_REG_LIST_SH_DCN10 \
+ .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 4, \
+ .CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 16
+
+#define TF_DEBUG_REG_LIST_MASK_DCN10 \
+ .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x30, \
+ .CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 0x70000
+
#define TF_REG_FIELD_LIST(type) \
type EXT_OVERSCAN_LEFT; \
type EXT_OVERSCAN_RIGHT; \
@@ -486,6 +536,14 @@
type CM_GAMUT_REMAP_MODE; \
type CM_GAMUT_REMAP_C11; \
type CM_GAMUT_REMAP_C12; \
+ type CM_GAMUT_REMAP_C13; \
+ type CM_GAMUT_REMAP_C14; \
+ type CM_GAMUT_REMAP_C21; \
+ type CM_GAMUT_REMAP_C22; \
+ type CM_GAMUT_REMAP_C23; \
+ type CM_GAMUT_REMAP_C24; \
+ type CM_GAMUT_REMAP_C31; \
+ type CM_GAMUT_REMAP_C32; \
type CM_GAMUT_REMAP_C33; \
type CM_GAMUT_REMAP_C34; \
type CM_COMA_C11; \
@@ -1010,6 +1068,9 @@
type CUR0_EXPANSION_MODE; \
type CUR0_ENABLE; \
type CM_BYPASS; \
+ type CM_TEST_DEBUG_INDEX; \
+ type CM_TEST_DEBUG_DATA_ID9_ICSC_MODE; \
+ type CM_TEST_DEBUG_DATA_ID9_OCSC_MODE;\
type FORMAT_CONTROL__ALPHA_EN; \
type CUR0_COLOR0; \
type CUR0_COLOR1; \
@@ -1054,6 +1115,10 @@ struct dcn_dpp_mask {
uint32_t RECOUT_SIZE; \
uint32_t CM_GAMUT_REMAP_CONTROL; \
uint32_t CM_GAMUT_REMAP_C11_C12; \
+ uint32_t CM_GAMUT_REMAP_C13_C14; \
+ uint32_t CM_GAMUT_REMAP_C21_C22; \
+ uint32_t CM_GAMUT_REMAP_C23_C24; \
+ uint32_t CM_GAMUT_REMAP_C31_C32; \
uint32_t CM_GAMUT_REMAP_C33_C34; \
uint32_t CM_COMA_C11_C12; \
uint32_t CM_COMA_C33_C34; \
@@ -1255,6 +1320,8 @@ struct dcn_dpp_mask {
uint32_t CM_IGAM_LUT_RW_CONTROL; \
uint32_t CM_IGAM_LUT_RW_INDEX; \
uint32_t CM_IGAM_LUT_SEQ_COLOR; \
+ uint32_t CM_TEST_DEBUG_INDEX; \
+ uint32_t CM_TEST_DEBUG_DATA; \
uint32_t FORMAT_CONTROL; \
uint32_t CNVC_SURFACE_PIXEL_FORMAT; \
uint32_t CURSOR_CONTROL; \
@@ -1289,8 +1356,8 @@ struct dcn10_dpp {
enum dcn10_input_csc_select {
INPUT_CSC_SELECT_BYPASS = 0,
- INPUT_CSC_SELECT_ICSC,
- INPUT_CSC_SELECT_COMA
+ INPUT_CSC_SELECT_ICSC = 1,
+ INPUT_CSC_SELECT_COMA = 2
};
void dpp1_set_cursor_attributes(
@@ -1364,6 +1431,9 @@ bool dpp_get_optimal_number_of_taps(
struct scaler_data *scl_data,
const struct scaling_taps *in_taps);
+void dpp_read_state(struct dpp *dpp_base,
+ struct dcn_dpp_state *s);
+
void dpp_reset(struct dpp *dpp_base);
void dpp1_cm_program_regamma_lut(
@@ -1408,7 +1478,7 @@ void dpp1_cnv_setup (
struct dpp *dpp_base,
enum surface_pixel_format format,
enum expansion_mode mode,
- struct csc_transform input_csc_color_matrix,
+ struct dc_csc_transform input_csc_color_matrix,
enum dc_color_space input_color_space);
void dpp1_full_bypass(struct dpp *dpp_base);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index fb32975e..4f373c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -216,41 +216,55 @@ static void dpp1_cm_program_color_matrix(
struct dcn10_dpp *dpp,
const uint16_t *regval)
{
- uint32_t mode;
+ uint32_t ocsc_mode;
+ uint32_t cur_mode;
struct color_matrices_reg gam_regs;
- REG_GET(CM_OCSC_CONTROL, CM_OCSC_MODE, &mode);
-
if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
- mode = 4;
+
+ /* determine which CSC matrix (ocsc or comb) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ REG_SET(CM_TEST_DEBUG_INDEX, 0,
+ CM_TEST_DEBUG_INDEX, 9);
+
+ REG_GET(CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode);
+
+ if (cur_mode != 4)
+ ocsc_mode = 4;
+ else
+ ocsc_mode = 5;
+
+
gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11;
gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12;
- if (mode == 4) {
+ if (ocsc_mode == 4) {
gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12);
gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34);
- cm_helper_program_color_matrices(
- dpp->base.ctx,
- regval,
- &gam_regs);
-
} else {
gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
- cm_helper_program_color_matrices(
- dpp->base.ctx,
- regval,
- &gam_regs);
}
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
+
}
void dpp1_cm_set_output_csc_default(
@@ -260,15 +274,14 @@ void dpp1_cm_set_output_csc_default(
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
const uint16_t *regval = NULL;
int arr_size;
- uint32_t ocsc_mode = 4;
regval = find_color_matrix(colorspace, &arr_size);
if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
+
dpp1_cm_program_color_matrix(dpp, regval);
- REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
}
static void dpp1_cm_get_reg_field(
@@ -329,9 +342,8 @@ void dpp1_cm_set_output_csc_adjustment(
const uint16_t *regval)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- uint32_t ocsc_mode = 4;
+
dpp1_cm_program_color_matrix(dpp, regval);
- REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
}
void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base,
@@ -437,17 +449,18 @@ void dpp1_cm_program_regamma_lutb_settings(
void dpp1_program_input_csc(
struct dpp *dpp_base,
enum dc_color_space color_space,
- enum dcn10_input_csc_select select,
+ enum dcn10_input_csc_select input_select,
const struct out_csc_color_matrix *tbl_entry)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
int i;
int arr_size = sizeof(dcn10_input_csc_matrix)/sizeof(struct dcn10_input_csc_matrix);
const uint16_t *regval = NULL;
- uint32_t selection = 1;
+ uint32_t cur_select = 0;
+ enum dcn10_input_csc_select select;
struct color_matrices_reg gam_regs;
- if (select == INPUT_CSC_SELECT_BYPASS) {
+ if (input_select == INPUT_CSC_SELECT_BYPASS) {
REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
return;
}
@@ -467,36 +480,45 @@ void dpp1_program_input_csc(
regval = tbl_entry->regval;
}
- if (select == INPUT_CSC_SELECT_COMA)
- selection = 2;
- REG_SET(CM_ICSC_CONTROL, 0,
- CM_ICSC_MODE, selection);
+ /* determine which CSC matrix (icsc or coma) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ REG_SET(CM_TEST_DEBUG_INDEX, 0,
+ CM_TEST_DEBUG_INDEX, 9);
+
+ REG_GET(CM_TEST_DEBUG_DATA,
+ CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select);
+
+ if (cur_select != INPUT_CSC_SELECT_ICSC)
+ select = INPUT_CSC_SELECT_ICSC;
+ else
+ select = INPUT_CSC_SELECT_COMA;
gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11;
gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
-
if (select == INPUT_CSC_SELECT_ICSC) {
gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
- cm_helper_program_color_matrices(
- dpp->base.ctx,
- regval,
- &gam_regs);
} else {
gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
- cm_helper_program_color_matrices(
- dpp->base.ctx,
- regval,
- &gam_regs);
}
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ REG_SET(CM_ICSC_CONTROL, 0,
+ CM_ICSC_MODE, select);
}
//keep here for now, decide multi dce support later
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 738f67f..b9fb14a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -476,8 +476,227 @@ void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub)
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
}
+static bool hubbub1_dcc_support_swizzle(
+ enum swizzle_mode_values swizzle,
+ unsigned int bytes_per_element,
+ enum segment_order *segment_order_horz,
+ enum segment_order *segment_order_vert)
+{
+ bool standard_swizzle = false;
+ bool display_swizzle = false;
+
+ switch (swizzle) {
+ case DC_SW_4KB_S:
+ case DC_SW_64KB_S:
+ case DC_SW_VAR_S:
+ case DC_SW_4KB_S_X:
+ case DC_SW_64KB_S_X:
+ case DC_SW_VAR_S_X:
+ standard_swizzle = true;
+ break;
+ case DC_SW_4KB_D:
+ case DC_SW_64KB_D:
+ case DC_SW_VAR_D:
+ case DC_SW_4KB_D_X:
+ case DC_SW_64KB_D_X:
+ case DC_SW_VAR_D_X:
+ display_swizzle = true;
+ break;
+ default:
+ break;
+ }
+
+ if (bytes_per_element == 1 && standard_swizzle) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__na;
+ return true;
+ }
+ if (bytes_per_element == 2 && standard_swizzle) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 4 && standard_swizzle) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 8 && standard_swizzle) {
+ *segment_order_horz = segment_order__na;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 8 && display_swizzle) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__non_contiguous;
+ return true;
+ }
+
+ return false;
+}
+
+static bool hubbub1_dcc_support_pixel_format(
+ enum surface_pixel_format format,
+ unsigned int *bytes_per_element)
+{
+ /* DML: get_bytes_per_element */
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ *bytes_per_element = 2;
+ return true;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ *bytes_per_element = 4;
+ return true;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ *bytes_per_element = 8;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
+ unsigned int bytes_per_element)
+{
+ /* copied from DML. might want to refactor DML to leverage from DML */
+ /* DML : get_blk256_size */
+ if (bytes_per_element == 1) {
+ *blk256_width = 16;
+ *blk256_height = 16;
+ } else if (bytes_per_element == 2) {
+ *blk256_width = 16;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 4) {
+ *blk256_width = 8;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 8) {
+ *blk256_width = 8;
+ *blk256_height = 4;
+ }
+}
+
+static void hubbub1_det_request_size(
+ unsigned int height,
+ unsigned int width,
+ unsigned int bpe,
+ bool *req128_horz_wc,
+ bool *req128_vert_wc)
+{
+ unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
+
+ unsigned int blk256_height = 0;
+ unsigned int blk256_width = 0;
+ unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
+
+ hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
+
+ swath_bytes_horz_wc = height * blk256_height * bpe;
+ swath_bytes_vert_wc = width * blk256_width * bpe;
+
+ *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+
+ *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+}
+
+static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output)
+{
+ struct dc *dc = hubbub->ctx->dc;
+ /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
+ enum dcc_control dcc_control;
+ unsigned int bpe;
+ enum segment_order segment_order_horz, segment_order_vert;
+ bool req128_horz_wc, req128_vert_wc;
+
+ memset(output, 0, sizeof(*output));
+
+ if (dc->debug.disable_dcc == DCC_DISABLE)
+ return false;
+
+ if (!hubbub->funcs->dcc_support_pixel_format(input->format, &bpe))
+ return false;
+
+ if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
+ &segment_order_horz, &segment_order_vert))
+ return false;
+
+ hubbub1_det_request_size(input->surface_size.height, input->surface_size.width,
+ bpe, &req128_horz_wc, &req128_vert_wc);
+
+ if (!req128_horz_wc && !req128_vert_wc) {
+ dcc_control = dcc_control__256_256_xxx;
+ } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
+ if (!req128_horz_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_horz == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
+ if (!req128_vert_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_vert == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else {
+ if ((req128_horz_wc &&
+ segment_order_horz == segment_order__non_contiguous) ||
+ (req128_vert_wc &&
+ segment_order_vert == segment_order__non_contiguous))
+ /* access_dir not known, must use most constraining */
+ dcc_control = dcc_control__256_64_64;
+ else
+ /* reg128 is true for either horz and vert
+ * but segment_order is contiguous
+ */
+ dcc_control = dcc_control__128_128_xxx;
+ }
+
+ if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
+ dcc_control != dcc_control__256_256_xxx)
+ return false;
+
+ switch (dcc_control) {
+ case dcc_control__256_256_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 256;
+ output->grph.rgb.independent_64b_blks = false;
+ break;
+ case dcc_control__128_128_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 128;
+ output->grph.rgb.max_compressed_blk_size = 128;
+ output->grph.rgb.independent_64b_blks = false;
+ break;
+ case dcc_control__256_64_64:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 64;
+ output->grph.rgb.independent_64b_blks = true;
+ break;
+ }
+
+ output->capable = true;
+ output->const_color_support = false;
+
+ return true;
+}
+
static const struct hubbub_funcs hubbub1_funcs = {
- .update_dchub = hubbub1_update_dchub
+ .update_dchub = hubbub1_update_dchub,
+ .dcc_support_swizzle = hubbub1_dcc_support_swizzle,
+ .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
+ .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
};
void hubbub1_construct(struct hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index a16e908..f479f54 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -27,6 +27,7 @@
#define __DC_HUBBUB_DCN10_H__
#include "core_types.h"
+#include "dchubbub.h"
#define HUBHUB_REG_LIST_DCN()\
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
@@ -173,12 +174,6 @@ struct dcn_hubbub_wm {
struct dcn_hubbub_wm_set sets[4];
};
-struct hubbub_funcs {
- void (*update_dchub)(
- struct hubbub *hubbub,
- struct dchub_init_data *dh_data);
-};
-
struct hubbub {
const struct hubbub_funcs *funcs;
struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 39b72f6..0cbc83e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -146,6 +146,9 @@ void hubp1_program_size_and_rotation(
* 444 or 420 luma
*/
if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ ASSERT(plane_size->video.chroma_pitch != 0);
+ /* Chroma pitch zero can cause system hang! */
+
pitch = plane_size->video.luma_pitch - 1;
meta_pitch = dcc->video.meta_pitch_l - 1;
pitch_c = plane_size->video.chroma_pitch - 1;
@@ -535,11 +538,13 @@ void hubp1_program_deadline(
REG_SET(VBLANK_PARAMETERS_3, 0,
REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
- REG_SET(NOM_PARAMETERS_0, 0,
- DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
+ if (REG(NOM_PARAMETERS_0))
+ REG_SET(NOM_PARAMETERS_0, 0,
+ DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
- REG_SET(NOM_PARAMETERS_1, 0,
- REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l);
+ if (REG(NOM_PARAMETERS_1))
+ REG_SET(NOM_PARAMETERS_1, 0,
+ REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l);
REG_SET(NOM_PARAMETERS_4, 0,
DST_Y_PER_META_ROW_NOM_L, dlg_attr->dst_y_per_meta_row_nom_l);
@@ -568,11 +573,13 @@ void hubp1_program_deadline(
REG_SET(VBLANK_PARAMETERS_4, 0,
REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
- REG_SET(NOM_PARAMETERS_2, 0,
- DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
+ if (REG(NOM_PARAMETERS_2))
+ REG_SET(NOM_PARAMETERS_2, 0,
+ DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
- REG_SET(NOM_PARAMETERS_3, 0,
- REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c);
+ if (REG(NOM_PARAMETERS_3))
+ REG_SET(NOM_PARAMETERS_3, 0,
+ REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c);
REG_SET(NOM_PARAMETERS_6, 0,
DST_Y_PER_META_ROW_NOM_C, dlg_attr->dst_y_per_meta_row_nom_c);
@@ -609,6 +616,13 @@ void hubp1_program_deadline(
REG_SET(DCN_SURF1_TTU_CNTL1, 0,
REFCYC_PER_REQ_DELIVERY_PRE,
ttu_attr->refcyc_per_req_delivery_pre_c);
+
+ REG_SET_3(DCN_CUR0_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0);
+ REG_SET(DCN_CUR0_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
}
static void hubp1_setup(
@@ -752,9 +766,159 @@ void min_set_viewport(
PRI_VIEWPORT_Y_START_C, viewport_c->y);
}
-void hubp1_read_state(struct dcn10_hubp *hubp1,
- struct dcn_hubp_state *s)
+void hubp1_read_state(struct hubp *hubp)
{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ struct dcn_hubp_state *s = &hubp1->state;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+ /* Requester */
+ REG_GET(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs->plane1_base_address);
+ REG_GET_4(DCN_EXPANSION_MODE,
+ DRQ_EXPANSION_MODE, &rq_regs->drq_expansion_mode,
+ PRQ_EXPANSION_MODE, &rq_regs->prq_expansion_mode,
+ MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode);
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG,
+ CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear);
+ REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C,
+ CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
+
+ /* DLG - Per hubp */
+ REG_GET_2(BLANK_OFFSET_0,
+ REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end,
+ DLG_V_BLANK_END, &dlg_attr->dlg_vblank_end);
+
+ REG_GET(BLANK_OFFSET_1,
+ MIN_DST_Y_NEXT_START, &dlg_attr->min_dst_y_next_start);
+
+ REG_GET(DST_DIMENSIONS,
+ REFCYC_PER_HTOTAL, &dlg_attr->refcyc_per_htotal);
+
+ REG_GET_2(DST_AFTER_SCALER,
+ REFCYC_X_AFTER_SCALER, &dlg_attr->refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, &dlg_attr->dst_y_after_scaler);
+
+ if (REG(PREFETCH_SETTINS))
+ REG_GET_2(PREFETCH_SETTINS,
+ DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
+ else
+ REG_GET_2(PREFETCH_SETTINGS,
+ DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, &dlg_attr->vratio_prefetch);
+
+ REG_GET_2(VBLANK_PARAMETERS_0,
+ DST_Y_PER_VM_VBLANK, &dlg_attr->dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, &dlg_attr->dst_y_per_row_vblank);
+
+ REG_GET(REF_FREQ_TO_PIX_FREQ,
+ REF_FREQ_TO_PIX_FREQ, &dlg_attr->ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_GET(VBLANK_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr->refcyc_per_pte_group_vblank_l);
+
+ REG_GET(VBLANK_PARAMETERS_3,
+ REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr->refcyc_per_meta_chunk_vblank_l);
+
+ if (REG(NOM_PARAMETERS_0))
+ REG_GET(NOM_PARAMETERS_0,
+ DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr->dst_y_per_pte_row_nom_l);
+
+ if (REG(NOM_PARAMETERS_1))
+ REG_GET(NOM_PARAMETERS_1,
+ REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr->refcyc_per_pte_group_nom_l);
+
+ REG_GET(NOM_PARAMETERS_4,
+ DST_Y_PER_META_ROW_NOM_L, &dlg_attr->dst_y_per_meta_row_nom_l);
+
+ REG_GET(NOM_PARAMETERS_5,
+ REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr->refcyc_per_meta_chunk_nom_l);
+
+ REG_GET_2(PER_LINE_DELIVERY_PRE,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr->refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr->refcyc_per_line_delivery_pre_c);
+
+ REG_GET_2(PER_LINE_DELIVERY,
+ REFCYC_PER_LINE_DELIVERY_L, &dlg_attr->refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, &dlg_attr->refcyc_per_line_delivery_c);
+
+ if (REG(PREFETCH_SETTINS_C))
+ REG_GET(PREFETCH_SETTINS_C,
+ VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
+ else
+ REG_GET(PREFETCH_SETTINGS_C,
+ VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c);
+
+ REG_GET(VBLANK_PARAMETERS_2,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr->refcyc_per_pte_group_vblank_c);
+
+ REG_GET(VBLANK_PARAMETERS_4,
+ REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr->refcyc_per_meta_chunk_vblank_c);
+
+ if (REG(NOM_PARAMETERS_2))
+ REG_GET(NOM_PARAMETERS_2,
+ DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr->dst_y_per_pte_row_nom_c);
+
+ if (REG(NOM_PARAMETERS_3))
+ REG_GET(NOM_PARAMETERS_3,
+ REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr->refcyc_per_pte_group_nom_c);
+
+ REG_GET(NOM_PARAMETERS_6,
+ DST_Y_PER_META_ROW_NOM_C, &dlg_attr->dst_y_per_meta_row_nom_c);
+
+ REG_GET(NOM_PARAMETERS_7,
+ REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr->refcyc_per_meta_chunk_nom_c);
+
+ /* TTU - per hubp */
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &ttu_attr->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &ttu_attr->qos_level_high_wm);
+
+ REG_GET_2(DCN_GLOBAL_TTU_CNTL,
+ MIN_TTU_VBLANK, &ttu_attr->min_ttu_vblank,
+ QoS_LEVEL_FLIP, &ttu_attr->qos_level_flip);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+
+ REG_GET_3(DCN_SURF0_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_l,
+ QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_l);
+
+ REG_GET(DCN_SURF0_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ &ttu_attr->refcyc_per_req_delivery_pre_l);
+
+ REG_GET_3(DCN_SURF1_TTU_CNTL0,
+ REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_c,
+ QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_c);
+
+ REG_GET(DCN_SURF1_TTU_CNTL1,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ &ttu_attr->refcyc_per_req_delivery_pre_c);
+
+ /* Rest of hubp */
REG_GET(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, &s->pixel_format);
@@ -897,7 +1061,7 @@ void hubp1_cursor_set_position(
if (src_x_offset >= (int)param->viewport_width)
cur_en = 0; /* not visible beyond right edge*/
- if (src_x_offset + (int)hubp->curs_attr.width < 0)
+ if (src_x_offset + (int)hubp->curs_attr.width <= 0)
cur_en = 0; /* not visible beyond left edge*/
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
@@ -952,6 +1116,7 @@ static struct hubp_funcs dcn10_hubp_funcs = {
.hubp_disconnect = hubp1_disconnect,
.hubp_clk_cntl = hubp1_clk_cntl,
.hubp_vtg_sel = hubp1_vtg_sel,
+ .hubp_read_state = hubp1_read_state,
};
/*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 4a3703e..fe9b8c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -30,6 +30,7 @@
#define TO_DCN10_HUBP(hubp)\
container_of(hubp, struct dcn10_hubp, base)
+/* Register address initialization macro for all ASICs (including those with reduced functionality) */
#define HUBP_REG_LIST_DCN(id)\
SRI(DCHUBP_CNTL, HUBP, id),\
SRI(HUBPREQ_DEBUG_DB, HUBP, id),\
@@ -78,16 +79,12 @@
SRI(REF_FREQ_TO_PIX_FREQ, HUBPREQ, id),\
SRI(VBLANK_PARAMETERS_1, HUBPREQ, id),\
SRI(VBLANK_PARAMETERS_3, HUBPREQ, id),\
- SRI(NOM_PARAMETERS_0, HUBPREQ, id),\
- SRI(NOM_PARAMETERS_1, HUBPREQ, id),\
SRI(NOM_PARAMETERS_4, HUBPREQ, id),\
SRI(NOM_PARAMETERS_5, HUBPREQ, id),\
SRI(PER_LINE_DELIVERY_PRE, HUBPREQ, id),\
SRI(PER_LINE_DELIVERY, HUBPREQ, id),\
SRI(VBLANK_PARAMETERS_2, HUBPREQ, id),\
SRI(VBLANK_PARAMETERS_4, HUBPREQ, id),\
- SRI(NOM_PARAMETERS_2, HUBPREQ, id),\
- SRI(NOM_PARAMETERS_3, HUBPREQ, id),\
SRI(NOM_PARAMETERS_6, HUBPREQ, id),\
SRI(NOM_PARAMETERS_7, HUBPREQ, id),\
SRI(DCN_TTU_QOS_WM, HUBPREQ, id),\
@@ -96,11 +93,21 @@
SRI(DCN_SURF0_TTU_CNTL1, HUBPREQ, id),\
SRI(DCN_SURF1_TTU_CNTL0, HUBPREQ, id),\
SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
- SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id),\
+ SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\
+ SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\
SRI(HUBP_CLK_CNTL, HUBP, id)
+/* Register address initialization macro for ASICs with VM */
+#define HUBP_REG_LIST_DCN_VM(id)\
+ SRI(NOM_PARAMETERS_0, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_1, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_2, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_3, HUBPREQ, id),\
+ SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id)
+
#define HUBP_REG_LIST_DCN10(id)\
HUBP_REG_LIST_DCN(id),\
+ HUBP_REG_LIST_DCN_VM(id),\
SRI(PREFETCH_SETTINS, HUBPREQ, id),\
SRI(PREFETCH_SETTINS_C, HUBPREQ, id),\
SRI(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, HUBPREQ, id),\
@@ -198,6 +205,8 @@
uint32_t DCN_SURF0_TTU_CNTL1; \
uint32_t DCN_SURF1_TTU_CNTL0; \
uint32_t DCN_SURF1_TTU_CNTL1; \
+ uint32_t DCN_CUR0_TTU_CNTL0; \
+ uint32_t DCN_CUR0_TTU_CNTL1; \
uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB; \
uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB; \
uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB; \
@@ -237,6 +246,7 @@
#define HUBP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
+/* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */
#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
@@ -335,8 +345,6 @@
HUBP_SF(HUBPREQ0_REF_FREQ_TO_PIX_FREQ, REF_FREQ_TO_PIX_FREQ, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_1, REFCYC_PER_PTE_GROUP_VBLANK_L, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_3, REFCYC_PER_META_CHUNK_VBLANK_L, mask_sh),\
- HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
- HUBP_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\
HUBP_SF(HUBPREQ0_NOM_PARAMETERS_4, DST_Y_PER_META_ROW_NOM_L, mask_sh),\
HUBP_SF(HUBPREQ0_NOM_PARAMETERS_5, REFCYC_PER_META_CHUNK_NOM_L, mask_sh),\
HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_L, mask_sh),\
@@ -345,8 +353,6 @@
HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_2, REFCYC_PER_PTE_GROUP_VBLANK_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_4, REFCYC_PER_META_CHUNK_VBLANK_C, mask_sh),\
- HUBP_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\
- HUBP_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\
HUBP_SF(HUBPREQ0_NOM_PARAMETERS_6, DST_Y_PER_META_ROW_NOM_C, mask_sh),\
HUBP_SF(HUBPREQ0_NOM_PARAMETERS_7, REFCYC_PER_META_CHUNK_NOM_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, mask_sh),\
@@ -357,12 +363,24 @@
HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
+ HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh)
+
+/* Mask/shift struct generation macro for ASICs with VM */
+#define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\
HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh),\
- HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh)
+ HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, REFCYC_PER_REQ_DELIVERY, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh)
#define HUBP_MASK_SH_LIST_DCN10(mask_sh)\
HUBP_MASK_SH_LIST_DCN(mask_sh),\
+ HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\
@@ -601,8 +619,29 @@ struct dcn_mi_mask {
DCN_HUBP_REG_FIELD_LIST(uint32_t);
};
+struct dcn_hubp_state {
+ struct _vcs_dpi_display_dlg_regs_st dlg_attr;
+ struct _vcs_dpi_display_ttu_regs_st ttu_attr;
+ struct _vcs_dpi_display_rq_regs_st rq_regs;
+ uint32_t pixel_format;
+ uint32_t inuse_addr_hi;
+ uint32_t viewport_width;
+ uint32_t viewport_height;
+ uint32_t rotation_angle;
+ uint32_t h_mirror_en;
+ uint32_t sw_mode;
+ uint32_t dcc_en;
+ uint32_t blank_en;
+ uint32_t underflow_status;
+ uint32_t ttu_disable;
+ uint32_t min_ttu_vblank;
+ uint32_t qos_level_low_wm;
+ uint32_t qos_level_high_wm;
+};
+
struct dcn10_hubp {
struct hubp base;
+ struct dcn_hubp_state state;
const struct dcn_mi_registers *hubp_regs;
const struct dcn_mi_shift *hubp_shift;
const struct dcn_mi_mask *hubp_mask;
@@ -680,25 +719,7 @@ void dcn10_hubp_construct(
const struct dcn_mi_shift *hubp_shift,
const struct dcn_mi_mask *hubp_mask);
-
-struct dcn_hubp_state {
- uint32_t pixel_format;
- uint32_t inuse_addr_hi;
- uint32_t viewport_width;
- uint32_t viewport_height;
- uint32_t rotation_angle;
- uint32_t h_mirror_en;
- uint32_t sw_mode;
- uint32_t dcc_en;
- uint32_t blank_en;
- uint32_t underflow_status;
- uint32_t ttu_disable;
- uint32_t min_ttu_vblank;
- uint32_t qos_level_low_wm;
- uint32_t qos_level_high_wm;
-};
-void hubp1_read_state(struct dcn10_hubp *hubp1,
- struct dcn_hubp_state *s);
+void hubp1_read_state(struct hubp *hubp);
enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 8b0f6b8..572fa60 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -45,8 +45,8 @@
#include "dcn10_hubbub.h"
#include "dcn10_cm_common.h"
-#define DC_LOGGER \
- ctx->logger
+#define DC_LOGGER_INIT(logger)
+
#define CTX \
hws->ctx
#define REG(reg)\
@@ -56,16 +56,17 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
+/*print is 17 wide, first two characters are spaces*/
#define DTN_INFO_MICRO_SEC(ref_cycle) \
print_microsec(dc_ctx, ref_cycle)
void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle)
{
- static const uint32_t ref_clk_mhz = 48;
- static const unsigned int frac = 10;
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ static const unsigned int frac = 1000;
uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
- DTN_INFO("%d.%d \t ",
+ DTN_INFO(" %11d.%03d",
us_x10 / frac,
us_x10 % frac);
}
@@ -92,14 +93,14 @@ void dcn10_log_hubbub_state(struct dc *dc)
hubbub1_wm_read_state(dc->res_pool->hubbub, &wm);
- DTN_INFO("HUBBUB WM: \t data_urgent \t pte_meta_urgent \t "
- "sr_enter \t sr_exit \t dram_clk_change \n");
+ DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
+ " sr_enter sr_exit dram_clk_change\n");
for (i = 0; i < 4; i++) {
struct dcn_hubbub_wm_set *s;
s = &wm.sets[i];
- DTN_INFO("WM_Set[%d]:\t ", s->wm_set);
+ DTN_INFO("WM_Set[%d]:", s->wm_set);
DTN_INFO_MICRO_SEC(s->data_urgent);
DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
DTN_INFO_MICRO_SEC(s->sr_enter);
@@ -111,6 +112,116 @@ void dcn10_log_hubbub_state(struct dc *dc)
DTN_INFO("\n");
}
+static void dcn10_log_hubp_states(struct dc *dc)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ DTN_INFO("HUBP: format addr_hi width height"
+ " rot mir sw_mode dcc_en blank_en ttu_dis underflow"
+ " min_ttu_vblank qos_low_wm qos_high_wm\n");
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct hubp *hubp = pool->hubps[i];
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
+
+ hubp->funcs->hubp_read_state(hubp);
+
+ DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh"
+ " %6d %8d %7d %8xh",
+ hubp->inst,
+ s->pixel_format,
+ s->inuse_addr_hi,
+ s->viewport_width,
+ s->viewport_height,
+ s->rotation_angle,
+ s->h_mirror_en,
+ s->sw_mode,
+ s->dcc_en,
+ s->blank_en,
+ s->ttu_disable,
+ s->underflow_status);
+ DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
+ DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
+ DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
+ DTN_INFO("\n");
+ }
+
+ DTN_INFO("\n=========RQ========\n");
+ DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
+ " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
+ " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+ DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
+ i, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
+ rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
+ rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
+ rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
+ rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
+ rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
+ rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
+ rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
+ rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
+ }
+
+ DTN_INFO("========DLG========\n");
+ DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
+ " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
+ " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
+ " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
+ " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
+ " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
+ " x_rp_dlay x_rr_sfl\n");
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
+
+ DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
+ "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
+ " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
+ i, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
+ dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
+ dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
+ dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
+ dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
+ dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
+ dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
+ dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
+ dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
+ dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
+ dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
+ dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
+ dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
+ dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
+ dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
+ dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
+ dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
+ dlg_regs->xfc_reg_remote_surface_flip_latency);
+ }
+
+ DTN_INFO("========TTU========\n");
+ DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
+ " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
+ " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
+
+ DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
+ i, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
+ ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
+ ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
+ ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
+ ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
+ ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
+ ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
+ }
+ DTN_INFO("\n");
+}
+
void dcn10_log_hw_state(struct dc *dc)
{
struct dc_context *dc_ctx = dc->ctx;
@@ -121,41 +232,64 @@ void dcn10_log_hw_state(struct dc *dc)
dcn10_log_hubbub_state(dc);
- DTN_INFO("HUBP:\t format \t addr_hi \t width \t height \t "
- "rotation \t mirror \t sw_mode \t "
- "dcc_en \t blank_en \t ttu_dis \t underflow \t "
- "min_ttu_vblank \t qos_low_wm \t qos_high_wm \n");
+ dcn10_log_hubp_states(dc);
+ DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
+ " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
+ "C31 C32 C33 C34\n");
for (i = 0; i < pool->pipe_count; i++) {
- struct hubp *hubp = pool->hubps[i];
- struct dcn_hubp_state s;
+ struct dpp *dpp = pool->dpps[i];
+ struct dcn_dpp_state s;
+
+ dpp->funcs->dpp_read_state(dpp, &s);
+
+ DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
+ "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
+ dpp->inst,
+ s.igam_input_format,
+ (s.igam_lut_mode == 0) ? "BypassFixed" :
+ ((s.igam_lut_mode == 1) ? "BypassFloat" :
+ ((s.igam_lut_mode == 2) ? "RAM" :
+ ((s.igam_lut_mode == 3) ? "RAM" :
+ "Unknown"))),
+ (s.dgam_lut_mode == 0) ? "Bypass" :
+ ((s.dgam_lut_mode == 1) ? "sRGB" :
+ ((s.dgam_lut_mode == 2) ? "Ycc" :
+ ((s.dgam_lut_mode == 3) ? "RAM" :
+ ((s.dgam_lut_mode == 4) ? "RAM" :
+ "Unknown")))),
+ (s.rgam_lut_mode == 0) ? "Bypass" :
+ ((s.rgam_lut_mode == 1) ? "sRGB" :
+ ((s.rgam_lut_mode == 2) ? "Ycc" :
+ ((s.rgam_lut_mode == 3) ? "RAM" :
+ ((s.rgam_lut_mode == 4) ? "RAM" :
+ "Unknown")))),
+ s.gamut_remap_mode,
+ s.gamut_remap_c11_c12,
+ s.gamut_remap_c13_c14,
+ s.gamut_remap_c21_c22,
+ s.gamut_remap_c23_c24,
+ s.gamut_remap_c31_c32,
+ s.gamut_remap_c33_c34);
+ DTN_INFO("\n");
+ }
+ DTN_INFO("\n");
- hubp1_read_state(TO_DCN10_HUBP(hubp), &s);
+ DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct mpcc_state s = {0};
- DTN_INFO("[%d]:\t %xh \t %xh \t %d \t %d \t "
- "%xh \t %xh \t %xh \t "
- "%d \t %d \t %d \t %xh \t",
- hubp->inst,
- s.pixel_format,
- s.inuse_addr_hi,
- s.viewport_width,
- s.viewport_height,
- s.rotation_angle,
- s.h_mirror_en,
- s.sw_mode,
- s.dcc_en,
- s.blank_en,
- s.ttu_disable,
- s.underflow_status);
- DTN_INFO_MICRO_SEC(s.min_ttu_vblank);
- DTN_INFO_MICRO_SEC(s.qos_level_low_wm);
- DTN_INFO_MICRO_SEC(s.qos_level_high_wm);
- DTN_INFO("\n");
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+ if (s.opp_id != 0xf)
+ DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
+ i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
+ s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
+ s.idle);
}
DTN_INFO("\n");
- DTN_INFO("OTG:\t v_bs \t v_be \t v_ss \t v_se \t vpol \t vmax \t vmin \t "
- "h_bs \t h_be \t h_ss \t h_se \t hpol \t htot \t vtot \t underflow\n");
+ DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel"
+ " h_bs h_be h_ss h_se hpol htot vtot underflow\n");
for (i = 0; i < pool->timing_generator_count; i++) {
struct timing_generator *tg = pool->timing_generators[i];
@@ -167,9 +301,8 @@ void dcn10_log_hw_state(struct dc *dc)
if ((s.otg_enabled & 1) == 0)
continue;
- DTN_INFO("[%d]:\t %d \t %d \t %d \t %d \t "
- "%d \t %d \t %d \t %d \t %d \t %d \t "
- "%d \t %d \t %d \t %d \t %d \t ",
+ DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d"
+ " %5d %5d %5d %5d %9d\n",
tg->inst,
s.v_blank_start,
s.v_blank_end,
@@ -178,6 +311,8 @@ void dcn10_log_hw_state(struct dc *dc)
s.v_sync_a_pol,
s.v_total_max,
s.v_total_min,
+ s.v_total_max_sel,
+ s.v_total_min_sel,
s.h_blank_start,
s.h_blank_end,
s.h_sync_a_start,
@@ -186,10 +321,19 @@ void dcn10_log_hw_state(struct dc *dc)
s.h_total,
s.v_total,
s.underflow_occurred_status);
- DTN_INFO("\n");
}
DTN_INFO("\n");
+ DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
+ "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
+ dc->current_state->bw.dcn.calc_clk.dcfclk_khz,
+ dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw.dcn.calc_clk.dispclk_khz,
+ dc->current_state->bw.dcn.calc_clk.dppclk_khz,
+ dc->current_state->bw.dcn.calc_clk.max_supported_dppclk_khz,
+ dc->current_state->bw.dcn.calc_clk.fclk_khz,
+ dc->current_state->bw.dcn.calc_clk.socclk_khz);
+
log_mpc_crc(dc);
DTN_INFO_END();
@@ -354,7 +498,7 @@ static void power_on_plane(
struct dce_hwseq *hws,
int plane_id)
{
- struct dc_context *ctx = hws->ctx;
+ DC_LOGGER_INIT(hws->ctx->logger);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
@@ -461,7 +605,7 @@ static void false_optc_underflow_wa(
tg->funcs->clear_optc_underflow(tg);
}
-static enum dc_status dcn10_prog_pixclk_crtc_otg(
+static enum dc_status dcn10_enable_stream_timing(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct dc *dc)
@@ -553,7 +697,7 @@ static void reset_back_end_for_pipe(
struct dc_state *context)
{
int i;
- struct dc_context *ctx = dc->ctx;
+ DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
return;
@@ -649,7 +793,7 @@ static void plane_atomic_power_down(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
- struct dc_context *ctx = dc->ctx;
+ DC_LOGGER_INIT(dc->ctx->logger);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
@@ -699,7 +843,7 @@ static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
- struct dc_context *ctx = dc->ctx;
+ DC_LOGGER_INIT(dc->ctx->logger);
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
return;
@@ -945,9 +1089,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
tf = plane_state->in_transfer_func;
if (plane_state->gamma_correction &&
- plane_state->gamma_correction->is_identity)
- dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
- else if (plane_state->gamma_correction && dce_use_lut(plane_state->format))
+ !plane_state->gamma_correction->is_identity
+ && dce_use_lut(plane_state->format))
dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
if (tf == NULL)
@@ -1433,7 +1576,7 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
}
}
-static void program_output_csc(struct dc *dc,
+static void dcn10_program_output_csc(struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
uint16_t *matrix,
@@ -1623,6 +1766,8 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
+
+
/* TODO: proper fix once fpga works */
if (dc->debug.surface_visual_confirm)
@@ -1649,6 +1794,7 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->stream->output_color_space)
&& per_pixel_alpha;
+
/*
* TODO: remove hack
* Note: currently there is a bug in init_hw such that
@@ -1659,6 +1805,12 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
*/
mpcc_id = hubp->inst;
+ /* If there is no full update, don't need to touch MPC tree*/
+ if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
+ mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
+ return;
+ }
+
/* check if this MPCC is already being used */
new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
/* remove MPCC if being used */
@@ -1777,7 +1929,7 @@ static void update_dchubp_dpp(
/*gamut remap*/
program_gamut_remap(pipe_ctx);
- program_output_csc(dc,
+ dc->hwss.program_output_csc(dc,
pipe_ctx,
pipe_ctx->stream->output_color_space,
pipe_ctx->stream->csc_color_matrix.matrix,
@@ -1810,9 +1962,9 @@ static void update_dchubp_dpp(
hubp->funcs->set_blank(hubp, false);
}
-static void dcn10_otg_blank(
+static void dcn10_blank_pixel_data(
struct dc *dc,
- struct stream_resource stream_res,
+ struct stream_resource *stream_res,
struct dc_stream_state *stream,
bool blank)
{
@@ -1823,21 +1975,21 @@ static void dcn10_otg_blank(
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
- if (stream_res.tg->funcs->set_blank_color)
- stream_res.tg->funcs->set_blank_color(
- stream_res.tg,
+ if (stream_res->tg->funcs->set_blank_color)
+ stream_res->tg->funcs->set_blank_color(
+ stream_res->tg,
&black_color);
if (!blank) {
- if (stream_res.tg->funcs->set_blank)
- stream_res.tg->funcs->set_blank(stream_res.tg, blank);
- if (stream_res.abm)
- stream_res.abm->funcs->set_abm_level(stream_res.abm, stream->abm_level);
+ if (stream_res->tg->funcs->set_blank)
+ stream_res->tg->funcs->set_blank(stream_res->tg, blank);
+ if (stream_res->abm)
+ stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
} else if (blank) {
- if (stream_res.abm)
- stream_res.abm->funcs->set_abm_immediate_disable(stream_res.abm);
- if (stream_res.tg->funcs->set_blank)
- stream_res.tg->funcs->set_blank(stream_res.tg, blank);
+ if (stream_res->abm)
+ stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
+ if (stream_res->tg->funcs->set_blank)
+ stream_res->tg->funcs->set_blank(stream_res->tg, blank);
}
}
@@ -1876,7 +2028,7 @@ static void program_all_pipe_in_tree(
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg);
- dcn10_otg_blank(dc, pipe_ctx->stream_res,
+ dc->hwss.blank_pixel_data(dc, &pipe_ctx->stream_res,
pipe_ctx->stream, blank);
}
@@ -1983,9 +2135,9 @@ static void dcn10_apply_ctx_for_surface(
bool removed_pipe[4] = { false };
unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
bool program_water_mark = false;
- struct dc_context *ctx = dc->ctx;
struct pipe_ctx *top_pipe_to_program =
find_top_pipe_for_stream(dc, context, stream);
+ DC_LOGGER_INIT(dc->ctx->logger);
if (!top_pipe_to_program)
return;
@@ -1996,7 +2148,7 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes == 0) {
/* OTG blank before remove all front end */
- dcn10_otg_blank(dc, top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
+ dc->hwss.blank_pixel_data(dc, &top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
}
/* Disconnect unused mpcc */
@@ -2527,6 +2679,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.update_pending_status = dcn10_update_pending_status,
.set_input_transfer_func = dcn10_set_input_transfer_func,
.set_output_transfer_func = dcn10_set_output_transfer_func,
+ .program_output_csc = dcn10_program_output_csc,
.power_down = dce110_power_down,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
.enable_timing_synchronization = dcn10_enable_timing_synchronization,
@@ -2538,10 +2691,11 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.blank_stream = dce110_blank_stream,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.disable_plane = dcn10_disable_plane,
+ .blank_pixel_data = dcn10_blank_pixel_data,
.pipe_control_lock = dcn10_pipe_control_lock,
.set_bandwidth = dcn10_set_bandwidth,
.reset_hw_ctx_wrap = reset_hw_ctx_wrap,
- .prog_pixclk_crtc_otg = dcn10_prog_pixclk_crtc_otg,
+ .enable_stream_timing = dcn10_enable_stream_timing,
.set_drr = set_drr,
.get_position = get_position,
.set_static_screen_control = set_static_screen_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 179890b..9ca51ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -65,6 +65,7 @@ static void mpc1_update_blending(
int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ struct mpcc *mpcc = mpc1_get_mpcc(mpc, mpcc_id);
REG_UPDATE_5(MPCC_CONTROL[mpcc_id],
MPCC_ALPHA_BLND_MODE, blnd_cfg->alpha_mode,
@@ -74,6 +75,7 @@ static void mpc1_update_blending(
MPCC_GLOBAL_GAIN, blnd_cfg->global_gain);
mpc1_set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id);
+ mpcc->blnd_cfg = *blnd_cfg;
}
void mpc1_update_stereo_mix(
@@ -235,8 +237,7 @@ struct mpcc *mpc1_insert_plane(
}
/* update the blending configuration */
- new_mpcc->blnd_cfg = *blnd_cfg;
- mpc->funcs->update_blending(mpc, &new_mpcc->blnd_cfg, mpcc_id);
+ mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id);
/* update the stereo mix settings, if provided */
if (sm_cfg != NULL) {
@@ -409,7 +410,26 @@ void mpc1_init_mpcc_list_from_hw(
}
}
+void mpc1_read_mpcc_state(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct mpcc_state *s)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+ REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id);
+ REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id);
+ REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id);
+ REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode,
+ MPCC_ALPHA_BLND_MODE, &s->alpha_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha,
+ MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only);
+ REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle,
+ MPCC_BUSY, &s->busy);
+}
+
const struct mpc_funcs dcn10_mpc_funcs = {
+ .read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc1_mpc_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
index 267a299..d3d16c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -183,4 +183,9 @@ struct mpcc *mpc1_get_mpcc_for_dpp(
struct mpc_tree *tree,
int dpp_id);
+void mpc1_read_mpcc_state(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct mpcc_state *s);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 4bf64d1..c734b7f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -93,6 +93,81 @@ static void optc1_disable_stereo(struct timing_generator *optc)
OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
}
+static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing)
+{
+ struct dc_crtc_timing patched_crtc_timing;
+ int vesa_sync_start;
+ int asic_blank_end;
+ int interlace_factor;
+ int vertical_line_start;
+
+ patched_crtc_timing = *dc_crtc_timing;
+ optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
+
+ vesa_sync_start = patched_crtc_timing.h_addressable +
+ patched_crtc_timing.h_border_right +
+ patched_crtc_timing.h_front_porch;
+
+ asic_blank_end = patched_crtc_timing.h_total -
+ vesa_sync_start -
+ patched_crtc_timing.h_border_left;
+
+ interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
+
+ vesa_sync_start = patched_crtc_timing.v_addressable +
+ patched_crtc_timing.v_border_bottom +
+ patched_crtc_timing.v_front_porch;
+
+ asic_blank_end = (patched_crtc_timing.v_total -
+ vesa_sync_start -
+ patched_crtc_timing.v_border_top)
+ * interlace_factor;
+
+ vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
+ if (vertical_line_start < 0) {
+ ASSERT(0);
+ vertical_line_start = 0;
+ }
+
+ return vertical_line_start;
+}
+
+void optc1_program_vline_interrupt(
+ struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing,
+ unsigned long long vsync_delta)
+{
+
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ unsigned long long req_delta_tens_of_usec = div64_u64((vsync_delta + 9999), 10000);
+ unsigned long long pix_clk_hundreds_khz = div64_u64((dc_crtc_timing->pix_clk_khz + 99), 100);
+ uint32_t req_delta_lines = (uint32_t) div64_u64(
+ (req_delta_tens_of_usec * pix_clk_hundreds_khz + dc_crtc_timing->h_total - 1),
+ dc_crtc_timing->h_total);
+
+ uint32_t vsync_line = get_start_vline(optc, dc_crtc_timing);
+ uint32_t start_line = 0;
+ uint32_t endLine = 0;
+
+ if (req_delta_lines != 0)
+ req_delta_lines--;
+
+ if (req_delta_lines > vsync_line)
+ start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) - 1;
+ else
+ start_line = vsync_line - req_delta_lines;
+
+ endLine = start_line + 2;
+
+ if (endLine >= dc_crtc_timing->v_total)
+ endLine = 2;
+
+ REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
+ OTG_VERTICAL_INTERRUPT0_LINE_START, start_line,
+ OTG_VERTICAL_INTERRUPT0_LINE_END, endLine);
+}
+
/**
* program_timing_generator used by mode timing set
* Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
@@ -780,17 +855,17 @@ void optc1_set_drr(
OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
OTG_SET_V_TOTAL_MIN_MASK, 0);
} else {
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, 0);
-
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, 0);
-
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
OTG_SET_V_TOTAL_MIN_MASK, 0,
OTG_V_TOTAL_MIN_SEL, 0,
OTG_V_TOTAL_MAX_SEL, 0,
OTG_FORCE_LOCK_ON_EVENT, 0);
+
+ REG_SET(OTG_V_TOTAL_MIN, 0,
+ OTG_V_TOTAL_MIN, 0);
+
+ REG_SET(OTG_V_TOTAL_MAX, 0,
+ OTG_V_TOTAL_MAX, 0);
}
}
@@ -1154,6 +1229,12 @@ void optc1_read_otg_state(struct optc *optc1,
REG_GET(OTG_V_TOTAL_MIN,
OTG_V_TOTAL_MIN, &s->v_total_min);
+ REG_GET(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MAX_SEL, &s->v_total_max_sel);
+
+ REG_GET(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, &s->v_total_min_sel);
+
REG_GET_2(OTG_V_SYNC_A,
OTG_V_SYNC_A_START, &s->v_sync_a_start,
OTG_V_SYNC_A_END, &s->v_sync_a_end);
@@ -1215,6 +1296,7 @@ static bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
static const struct timing_generator_funcs dcn10_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
+ .program_vline_interrupt = optc1_program_vline_interrupt,
.program_global_sync = optc1_program_global_sync,
.enable_crtc = optc1_enable_crtc,
.disable_crtc = optc1_disable_crtc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index d25e7bf..89e09e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -65,6 +65,8 @@
SRI(OTG_NOM_VERT_POSITION, OTG, inst),\
SRI(OTG_BLACK_COLOR, OTG, inst),\
SRI(OTG_CLOCK_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\
SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\
SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\
SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
@@ -124,6 +126,8 @@ struct dcn_optc_registers {
uint32_t OTG_TEST_PATTERN_CONTROL;
uint32_t OTG_TEST_PATTERN_COLOR;
uint32_t OTG_CLOCK_CONTROL;
+ uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL;
+ uint32_t OTG_VERTICAL_INTERRUPT0_POSITION;
uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL;
uint32_t OTG_VERTICAL_INTERRUPT2_POSITION;
uint32_t OPTC_INPUT_CLOCK_CONTROL;
@@ -206,6 +210,9 @@ struct dcn_optc_registers {
SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\
SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\
SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\
SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\
@@ -323,6 +330,9 @@ struct dcn_optc_registers {
type OTG_CLOCK_EN;\
type OTG_CLOCK_ON;\
type OTG_CLOCK_GATE_DIS;\
+ type OTG_VERTICAL_INTERRUPT0_INT_ENABLE;\
+ type OTG_VERTICAL_INTERRUPT0_LINE_START;\
+ type OTG_VERTICAL_INTERRUPT0_LINE_END;\
type OTG_VERTICAL_INTERRUPT2_INT_ENABLE;\
type OTG_VERTICAL_INTERRUPT2_LINE_START;\
type OPTC_INPUT_CLK_EN;\
@@ -396,6 +406,8 @@ struct dcn_otg_state {
uint32_t v_total;
uint32_t v_total_max;
uint32_t v_total_min;
+ uint32_t v_total_min_sel;
+ uint32_t v_total_max_sel;
uint32_t v_sync_a_start;
uint32_t v_sync_a_end;
uint32_t h_blank_start;
@@ -420,6 +432,10 @@ void optc1_program_timing(
const struct dc_crtc_timing *dc_crtc_timing,
bool use_vbios);
+void optc1_program_vline_interrupt(struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing,
+ unsigned long long vsync_delta);
+
void optc1_program_global_sync(
struct timing_generator *optc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 02bd664..2c0a315 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -39,7 +39,7 @@
#include "dce110/dce110_hw_sequencer.h"
#include "dcn10/dcn10_opp.h"
#include "dce/dce_link_encoder.h"
-#include "dce/dce_stream_encoder.h"
+#include "dcn10/dcn10_stream_encoder.h"
#include "dce/dce_clocks.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
@@ -166,36 +166,22 @@ static const struct dce_abm_mask abm_mask = {
#define stream_enc_regs(id)\
[id] = {\
- SE_DCN_REG_LIST(id),\
- .TMDS_CNTL = 0,\
- .AFMT_AVI_INFO0 = 0,\
- .AFMT_AVI_INFO1 = 0,\
- .AFMT_AVI_INFO2 = 0,\
- .AFMT_AVI_INFO3 = 0,\
+ SE_DCN_REG_LIST(id)\
}
-static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
stream_enc_regs(0),
stream_enc_regs(1),
stream_enc_regs(2),
stream_enc_regs(3),
};
-static const struct dce_stream_encoder_shift se_shift = {
+static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT)
};
-static const struct dce_stream_encoder_mask se_mask = {
- SE_COMMON_MASK_SH_LIST_DCN10(_MASK),
- .AFMT_GENERIC0_UPDATE = 0,
- .AFMT_GENERIC2_UPDATE = 0,
- .DP_DYN_RANGE = 0,
- .DP_YCBCR_RANGE = 0,
- .HDMI_AVI_INFO_SEND = 0,
- .HDMI_AVI_INFO_CONT = 0,
- .HDMI_AVI_INFO_LINE = 0,
- .DP_SEC_AVI_ENABLE = 0,
- .AFMT_AVI_INFO_VERSION = 0
+static const struct dcn10_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCN10(_MASK)
};
#define audio_regs(id)\
@@ -320,11 +306,14 @@ static const struct dcn_dpp_registers tf_regs[] = {
};
static const struct dcn_dpp_shift tf_shift = {
- TF_REG_LIST_SH_MASK_DCN10(__SHIFT)
+ TF_REG_LIST_SH_MASK_DCN10(__SHIFT),
+ TF_DEBUG_REG_LIST_SH_DCN10
+
};
static const struct dcn_dpp_mask tf_mask = {
TF_REG_LIST_SH_MASK_DCN10(_MASK),
+ TF_DEBUG_REG_LIST_MASK_DCN10
};
static const struct dcn_mpc_registers mpc_regs = {
@@ -650,16 +639,16 @@ static struct stream_encoder *dcn10_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
{
- struct dce110_stream_encoder *enc110 =
- kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+ struct dcn10_stream_encoder *enc1 =
+ kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
- if (!enc110)
+ if (!enc1)
return NULL;
- dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ dcn10_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
- return &enc110->base;
+ return &enc1->base;
}
static const struct dce_hwseq_registers hwseq_reg = {
@@ -918,36 +907,6 @@ enum dc_status dcn10_add_stream_to_ctx(
return result;
}
-enum dc_status dcn10_validate_guaranteed(
- struct dc *dc,
- struct dc_stream_state *dc_stream,
- struct dc_state *context)
-{
- enum dc_status result = DC_ERROR_UNEXPECTED;
-
- context->streams[0] = dc_stream;
- dc_stream_retain(context->streams[0]);
- context->stream_count++;
-
- result = resource_map_pool_resources(dc, context, dc_stream);
-
- if (result == DC_OK)
- result = resource_map_phy_clock_resources(dc, context, dc_stream);
-
- if (result == DC_OK)
- result = build_mapped_resource(dc, context, dc_stream);
-
- if (result == DC_OK) {
- validate_guaranteed_copy_streams(
- context, dc->caps.max_streams);
- result = resource_build_scaling_params_for_context(dc, context);
- }
- if (result == DC_OK && !dcn_validate_bandwidth(dc, context))
- return DC_FAIL_BANDWIDTH_VALIDATE;
-
- return result;
-}
-
static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
struct dc_state *context,
const struct resource_pool *pool,
@@ -978,235 +937,16 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
return idle_pipe;
}
-enum dcc_control {
- dcc_control__256_256_xxx,
- dcc_control__128_128_xxx,
- dcc_control__256_64_64,
-};
-
-enum segment_order {
- segment_order__na,
- segment_order__contiguous,
- segment_order__non_contiguous,
-};
-
-static bool dcc_support_pixel_format(
- enum surface_pixel_format format,
- unsigned int *bytes_per_element)
-{
- /* DML: get_bytes_per_element */
- switch (format) {
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
- case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
- *bytes_per_element = 2;
- return true;
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
- case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
- case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
- *bytes_per_element = 4;
- return true;
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
- case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
- *bytes_per_element = 8;
- return true;
- default:
- return false;
- }
-}
-
-static bool dcc_support_swizzle(
- enum swizzle_mode_values swizzle,
- unsigned int bytes_per_element,
- enum segment_order *segment_order_horz,
- enum segment_order *segment_order_vert)
-{
- bool standard_swizzle = false;
- bool display_swizzle = false;
-
- switch (swizzle) {
- case DC_SW_4KB_S:
- case DC_SW_64KB_S:
- case DC_SW_VAR_S:
- case DC_SW_4KB_S_X:
- case DC_SW_64KB_S_X:
- case DC_SW_VAR_S_X:
- standard_swizzle = true;
- break;
- case DC_SW_4KB_D:
- case DC_SW_64KB_D:
- case DC_SW_VAR_D:
- case DC_SW_4KB_D_X:
- case DC_SW_64KB_D_X:
- case DC_SW_VAR_D_X:
- display_swizzle = true;
- break;
- default:
- break;
- }
-
- if (bytes_per_element == 1 && standard_swizzle) {
- *segment_order_horz = segment_order__contiguous;
- *segment_order_vert = segment_order__na;
- return true;
- }
- if (bytes_per_element == 2 && standard_swizzle) {
- *segment_order_horz = segment_order__non_contiguous;
- *segment_order_vert = segment_order__contiguous;
- return true;
- }
- if (bytes_per_element == 4 && standard_swizzle) {
- *segment_order_horz = segment_order__non_contiguous;
- *segment_order_vert = segment_order__contiguous;
- return true;
- }
- if (bytes_per_element == 8 && standard_swizzle) {
- *segment_order_horz = segment_order__na;
- *segment_order_vert = segment_order__contiguous;
- return true;
- }
- if (bytes_per_element == 8 && display_swizzle) {
- *segment_order_horz = segment_order__contiguous;
- *segment_order_vert = segment_order__non_contiguous;
- return true;
- }
-
- return false;
-}
-
-static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
- unsigned int bytes_per_element)
-{
- /* copied from DML. might want to refactor DML to leverage from DML */
- /* DML : get_blk256_size */
- if (bytes_per_element == 1) {
- *blk256_width = 16;
- *blk256_height = 16;
- } else if (bytes_per_element == 2) {
- *blk256_width = 16;
- *blk256_height = 8;
- } else if (bytes_per_element == 4) {
- *blk256_width = 8;
- *blk256_height = 8;
- } else if (bytes_per_element == 8) {
- *blk256_width = 8;
- *blk256_height = 4;
- }
-}
-
-static void det_request_size(
- unsigned int height,
- unsigned int width,
- unsigned int bpe,
- bool *req128_horz_wc,
- bool *req128_vert_wc)
-{
- unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
-
- unsigned int blk256_height = 0;
- unsigned int blk256_width = 0;
- unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
-
- get_blk256_size(&blk256_width, &blk256_height, bpe);
-
- swath_bytes_horz_wc = height * blk256_height * bpe;
- swath_bytes_vert_wc = width * blk256_width * bpe;
-
- *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
- false : /* full 256B request */
- true; /* half 128b request */
-
- *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
- false : /* full 256B request */
- true; /* half 128b request */
-}
-
-static bool get_dcc_compression_cap(const struct dc *dc,
+static bool dcn10_get_dcc_compression_cap(const struct dc *dc,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output)
{
- /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
- enum dcc_control dcc_control;
- unsigned int bpe;
- enum segment_order segment_order_horz, segment_order_vert;
- bool req128_horz_wc, req128_vert_wc;
-
- memset(output, 0, sizeof(*output));
-
- if (dc->debug.disable_dcc == DCC_DISABLE)
- return false;
-
- if (!dcc_support_pixel_format(input->format,
- &bpe))
- return false;
-
- if (!dcc_support_swizzle(input->swizzle_mode, bpe,
- &segment_order_horz, &segment_order_vert))
- return false;
-
- det_request_size(input->surface_size.height, input->surface_size.width,
- bpe, &req128_horz_wc, &req128_vert_wc);
-
- if (!req128_horz_wc && !req128_vert_wc) {
- dcc_control = dcc_control__256_256_xxx;
- } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
- if (!req128_horz_wc)
- dcc_control = dcc_control__256_256_xxx;
- else if (segment_order_horz == segment_order__contiguous)
- dcc_control = dcc_control__128_128_xxx;
- else
- dcc_control = dcc_control__256_64_64;
- } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
- if (!req128_vert_wc)
- dcc_control = dcc_control__256_256_xxx;
- else if (segment_order_vert == segment_order__contiguous)
- dcc_control = dcc_control__128_128_xxx;
- else
- dcc_control = dcc_control__256_64_64;
- } else {
- if ((req128_horz_wc &&
- segment_order_horz == segment_order__non_contiguous) ||
- (req128_vert_wc &&
- segment_order_vert == segment_order__non_contiguous))
- /* access_dir not known, must use most constraining */
- dcc_control = dcc_control__256_64_64;
- else
- /* reg128 is true for either horz and vert
- * but segment_order is contiguous
- */
- dcc_control = dcc_control__128_128_xxx;
- }
-
- if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
- dcc_control != dcc_control__256_256_xxx)
- return false;
-
- switch (dcc_control) {
- case dcc_control__256_256_xxx:
- output->grph.rgb.max_uncompressed_blk_size = 256;
- output->grph.rgb.max_compressed_blk_size = 256;
- output->grph.rgb.independent_64b_blks = false;
- break;
- case dcc_control__128_128_xxx:
- output->grph.rgb.max_uncompressed_blk_size = 128;
- output->grph.rgb.max_compressed_blk_size = 128;
- output->grph.rgb.independent_64b_blks = false;
- break;
- case dcc_control__256_64_64:
- output->grph.rgb.max_uncompressed_blk_size = 256;
- output->grph.rgb.max_compressed_blk_size = 64;
- output->grph.rgb.independent_64b_blks = true;
- break;
- }
-
- output->capable = true;
- output->const_color_support = false;
-
- return true;
+ return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
+ dc->res_pool->hubbub,
+ input,
+ output);
}
-
static void dcn10_destroy_resource_pool(struct resource_pool **pool)
{
struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
@@ -1227,13 +967,12 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
}
static struct dc_cap_funcs cap_funcs = {
- .get_dcc_compression_cap = get_dcc_compression_cap
+ .get_dcc_compression_cap = dcn10_get_dcc_compression_cap
};
static struct resource_funcs dcn10_res_pool_funcs = {
.destroy = dcn10_destroy_resource_pool,
.link_enc_create = dcn10_link_encoder_create,
- .validate_guaranteed = dcn10_validate_guaranteed,
.validate_bandwidth = dcn_validate_bandwidth,
.acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
.validate_plane = dcn10_validate_plane,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
new file mode 100644
index 0000000..befd863
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -0,0 +1,1490 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dc_bios_types.h"
+#include "dcn10_stream_encoder.h"
+#include "reg_helper.h"
+#include "hw_shared.h"
+
+#define DC_LOGGER \
+ enc1->base.ctx->logger
+
+
+#define REG(reg)\
+ (enc1->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc1->se_shift->field_name, enc1->se_mask->field_name
+
+#define VBI_LINE_0 0
+#define DP_BLANK_MAX_RETRY 20
+#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000
+
+
+enum {
+ DP_MST_UPDATE_MAX_RETRY = 50
+};
+
+#define CTX \
+ enc1->base.ctx
+
+void enc1_update_generic_info_packet(
+ struct dcn10_stream_encoder *enc1,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet)
+{
+ uint32_t regval;
+ /* TODOFPGA Figure out a proper number for max_retries polling for lock
+ * use 50 for now.
+ */
+ uint32_t max_retries = 50;
+
+ /*we need turn on clock before programming AFMT block*/
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
+ if (packet_index >= 8)
+ ASSERT(0);
+
+ /* poll dig_update_lock is not locked -> asic internal signal
+ * assume otg master lock will unlock it
+ */
+/* REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS,
+ 0, 10, max_retries);*/
+
+ /* check if HW reading GSP memory */
+ REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
+ 0, 10, max_retries);
+
+ /* HW does is not reading GSP memory not reading too long ->
+ * something wrong. clear GPS memory access and notify?
+ * hw SW is writing to GSP memory
+ */
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
+
+ /* choose which generic packet to use */
+ regval = REG_READ(AFMT_VBI_PACKET_CONTROL);
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
+ AFMT_GENERIC_INDEX, packet_index);
+
+ /* write generic packet header
+ * (4th byte is for GENERIC0 only)
+ */
+ REG_SET_4(AFMT_GENERIC_HDR, 0,
+ AFMT_GENERIC_HB0, info_packet->hb0,
+ AFMT_GENERIC_HB1, info_packet->hb1,
+ AFMT_GENERIC_HB2, info_packet->hb2,
+ AFMT_GENERIC_HB3, info_packet->hb3);
+
+ /* write generic packet contents
+ * (we never use last 4 bytes)
+ * there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers
+ */
+ {
+ const uint32_t *content =
+ (const uint32_t *) &info_packet->sb[0];
+
+ REG_WRITE(AFMT_GENERIC_0, *content++);
+ REG_WRITE(AFMT_GENERIC_1, *content++);
+ REG_WRITE(AFMT_GENERIC_2, *content++);
+ REG_WRITE(AFMT_GENERIC_3, *content++);
+ REG_WRITE(AFMT_GENERIC_4, *content++);
+ REG_WRITE(AFMT_GENERIC_5, *content++);
+ REG_WRITE(AFMT_GENERIC_6, *content++);
+ REG_WRITE(AFMT_GENERIC_7, *content);
+ }
+
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC0_FRAME_UPDATE, 1);
+ break;
+ case 1:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC1_FRAME_UPDATE, 1);
+ break;
+ case 2:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC2_FRAME_UPDATE, 1);
+ break;
+ case 3:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC3_FRAME_UPDATE, 1);
+ break;
+ case 4:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC4_FRAME_UPDATE, 1);
+ break;
+ case 5:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC5_FRAME_UPDATE, 1);
+ break;
+ case 6:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC6_FRAME_UPDATE, 1);
+ break;
+ case 7:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC7_FRAME_UPDATE, 1);
+ break;
+ default:
+ break;
+ }
+}
+
+static void enc1_update_hdmi_info_packet(
+ struct dcn10_stream_encoder *enc1,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet)
+{
+ uint32_t cont, send, line;
+
+ if (info_packet->valid) {
+ enc1_update_generic_info_packet(
+ enc1,
+ packet_index,
+ info_packet);
+
+ /* enable transmission of packet(s) -
+ * packet transmission begins on the next frame
+ */
+ cont = 1;
+ /* send packet(s) every frame */
+ send = 1;
+ /* select line number to send packets on */
+ line = 2;
+ } else {
+ cont = 0;
+ send = 0;
+ line = 0;
+ }
+
+ /* choose which generic packet control to use */
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 1:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+ case 2:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 3:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+ case 4:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 5:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+ case 6:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 7:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+ default:
+ /* invalid HW packet index */
+ DC_LOG_WARNING(
+ "Invalid HW packet index: %s()\n",
+ __func__);
+ return;
+ }
+}
+
+/* setup stream encoder in dp mode */
+void enc1_stream_encoder_dp_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space)
+{
+ uint32_t h_active_start;
+ uint32_t v_active_start;
+ uint32_t misc0 = 0;
+ uint32_t misc1 = 0;
+ uint32_t h_blank;
+ uint32_t h_back_porch;
+ uint8_t synchronous_clock = 0; /* asynchronous mode */
+ uint8_t colorimetry_bpc;
+ uint8_t dynamic_range_rgb = 0; /*full range*/
+ uint8_t dynamic_range_ycbcr = 1; /*bt709*/
+
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
+
+ /* set pixel encoding */
+ switch (crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_TYPE_YCBCR422);
+ break;
+ case PIXEL_ENCODING_YCBCR444:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_TYPE_YCBCR444);
+
+ if (crtc_timing->flags.Y_ONLY)
+ if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
+ /* HW testing only, no use case yet.
+ * Color depth of Y-only could be
+ * 8, 10, 12, 16 bits
+ */
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_TYPE_Y_ONLY);
+ /* Note: DP_MSA_MISC1 bit 7 is the indicator
+ * of Y-only mode.
+ * This bit is set in HW if register
+ * DP_PIXEL_ENCODING is programmed to 0x4
+ */
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_TYPE_YCBCR420);
+ REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
+ break;
+ default:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_TYPE_RGB444);
+ break;
+ }
+
+ misc1 = REG_READ(DP_MSA_MISC);
+
+ /* set color depth */
+
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ 0);
+ break;
+ case COLOR_DEPTH_888:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_PIXEL_DEPTH_8BPC);
+ break;
+ case COLOR_DEPTH_101010:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_PIXEL_DEPTH_10BPC);
+
+ break;
+ case COLOR_DEPTH_121212:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_PIXEL_DEPTH_12BPC);
+ break;
+ default:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_PIXEL_DEPTH_6BPC);
+ break;
+ }
+
+ /* set dynamic range and YCbCr range */
+
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ colorimetry_bpc = 0;
+ break;
+ case COLOR_DEPTH_888:
+ colorimetry_bpc = 1;
+ break;
+ case COLOR_DEPTH_101010:
+ colorimetry_bpc = 2;
+ break;
+ case COLOR_DEPTH_121212:
+ colorimetry_bpc = 3;
+ break;
+ default:
+ colorimetry_bpc = 0;
+ break;
+ }
+
+ misc0 = misc0 | synchronous_clock;
+ misc0 = colorimetry_bpc << 5;
+
+ switch (output_color_space) {
+ case COLOR_SPACE_SRGB:
+ misc0 = misc0 | 0x0;
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ dynamic_range_rgb = 0; /*full range*/
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ misc0 = misc0 | 0x8; /* bit3=1 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ dynamic_range_rgb = 1; /*limited range*/
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ dynamic_range_ycbcr = 0; /*bt601*/
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+ else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444)
+ misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ dynamic_range_ycbcr = 1; /*bt709*/
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+ else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444)
+ misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+ break;
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ dynamic_range_rgb = 1; /*limited range*/
+ break;
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_XR_RGB:
+ case COLOR_SPACE_MSREF_SCRGB:
+ case COLOR_SPACE_ADOBERGB:
+ case COLOR_SPACE_DCIP3:
+ case COLOR_SPACE_XV_YCC_709:
+ case COLOR_SPACE_XV_YCC_601:
+ case COLOR_SPACE_DISPLAYNATIVE:
+ case COLOR_SPACE_DOLBYVISION:
+ case COLOR_SPACE_APPCTRL:
+ case COLOR_SPACE_CUSTOMPOINTS:
+ case COLOR_SPACE_UNKNOWN:
+ /* do nothing */
+ break;
+ }
+
+ REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
+ REG_WRITE(DP_MSA_MISC, misc1); /* MSA_MISC1 */
+
+ /* dcn new register
+ * dc_crtc_timing is vesa dmt struct. data from edid
+ */
+ REG_SET_2(DP_MSA_TIMING_PARAM1, 0,
+ DP_MSA_HTOTAL, crtc_timing->h_total,
+ DP_MSA_VTOTAL, crtc_timing->v_total);
+
+ /* calculate from vesa timing parameters
+ * h_active_start related to leading edge of sync
+ */
+
+ h_blank = crtc_timing->h_total - crtc_timing->h_border_left -
+ crtc_timing->h_addressable - crtc_timing->h_border_right;
+
+ h_back_porch = h_blank - crtc_timing->h_front_porch -
+ crtc_timing->h_sync_width;
+
+ /* start at beginning of left border */
+ h_active_start = crtc_timing->h_sync_width + h_back_porch;
+
+
+ v_active_start = crtc_timing->v_total - crtc_timing->v_border_top -
+ crtc_timing->v_addressable - crtc_timing->v_border_bottom -
+ crtc_timing->v_front_porch;
+
+
+ /* start at beginning of left border */
+ REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
+ DP_MSA_HSTART, h_active_start,
+ DP_MSA_VSTART, v_active_start);
+
+ REG_SET_4(DP_MSA_TIMING_PARAM3, 0,
+ DP_MSA_HSYNCWIDTH,
+ crtc_timing->h_sync_width,
+ DP_MSA_HSYNCPOLARITY,
+ !crtc_timing->flags.HSYNC_POSITIVE_POLARITY,
+ DP_MSA_VSYNCWIDTH,
+ crtc_timing->v_sync_width,
+ DP_MSA_VSYNCPOLARITY,
+ !crtc_timing->flags.VSYNC_POSITIVE_POLARITY);
+
+ /* HWDITH include border or overscan */
+ REG_SET_2(DP_MSA_TIMING_PARAM4, 0,
+ DP_MSA_HWIDTH, crtc_timing->h_border_left +
+ crtc_timing->h_addressable + crtc_timing->h_border_right,
+ DP_MSA_VHEIGHT, crtc_timing->v_border_top +
+ crtc_timing->v_addressable + crtc_timing->v_border_bottom);
+}
+
+static void enc1_stream_encoder_set_stream_attribute_helper(
+ struct dcn10_stream_encoder *enc1,
+ struct dc_crtc_timing *crtc_timing)
+{
+ switch (crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 1);
+ break;
+ default:
+ REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 0);
+ break;
+ }
+ REG_UPDATE(DIG_FE_CNTL, TMDS_COLOR_FORMAT, 0);
+}
+
+/* setup stream encoder in hdmi mode */
+void enc1_stream_encoder_hdmi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ struct bp_encoder_control cntl = {0};
+
+ cntl.action = ENCODER_CONTROL_SETUP;
+ cntl.engine_id = enc1->base.id;
+ cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ cntl.enable_dp_audio = enable_audio;
+ cntl.pixel_clock = actual_pix_clk_khz;
+ cntl.lanes_number = LANE_COUNT_FOUR;
+
+ if (enc1->base.bp->funcs->encoder_control(
+ enc1->base.bp, &cntl) != BP_RESULT_OK)
+ return;
+
+ enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
+
+ /* setup HDMI engine */
+ REG_UPDATE_5(HDMI_CONTROL,
+ HDMI_PACKET_GEN_VERSION, 1,
+ HDMI_KEEPOUT_MODE, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0,
+ HDMI_DATA_SCRAMBLE_EN, 0,
+ HDMI_CLOCK_CHANNEL_RATE, 0);
+
+
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
+ break;
+ case COLOR_DEPTH_101010:
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 1,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ }
+ break;
+ case COLOR_DEPTH_121212:
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 2,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 2,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ }
+ break;
+ case COLOR_DEPTH_161616:
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 3,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ break;
+ default:
+ break;
+ }
+
+ if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) {
+ /* enable HDMI data scrambler
+ * HDMI_CLOCK_CHANNEL_RATE_MORE_340M
+ * Clock channel frequency is 1/4 of character rate.
+ */
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DATA_SCRAMBLE_EN, 1,
+ HDMI_CLOCK_CHANNEL_RATE, 1);
+ } else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) {
+
+ /* TODO: New feature for DCE11, still need to implement */
+
+ /* enable HDMI data scrambler
+ * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE
+ * Clock channel frequency is the same
+ * as character rate
+ */
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DATA_SCRAMBLE_EN, 1,
+ HDMI_CLOCK_CHANNEL_RATE, 0);
+ }
+
+
+ REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL,
+ HDMI_GC_CONT, 1,
+ HDMI_GC_SEND, 1,
+ HDMI_NULL_SEND, 1);
+
+ /* following belongs to audio */
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
+
+ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE,
+ VBI_LINE_0 + 2);
+
+ REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
+}
+
+/* setup stream encoder in dvi mode */
+void enc1_stream_encoder_dvi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ struct bp_encoder_control cntl = {0};
+
+ cntl.action = ENCODER_CONTROL_SETUP;
+ cntl.engine_id = enc1->base.id;
+ cntl.signal = is_dual_link ?
+ SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
+ cntl.enable_dp_audio = false;
+ cntl.pixel_clock = crtc_timing->pix_clk_khz;
+ cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
+
+ if (enc1->base.bp->funcs->encoder_control(
+ enc1->base.bp, &cntl) != BP_RESULT_OK)
+ return;
+
+ ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
+ ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888);
+ enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
+}
+
+void enc1_stream_encoder_set_mst_bandwidth(
+ struct stream_encoder *enc,
+ struct fixed31_32 avg_time_slots_per_mtp)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t x = dal_fixed31_32_floor(
+ avg_time_slots_per_mtp);
+ uint32_t y = dal_fixed31_32_ceil(
+ dal_fixed31_32_shl(
+ dal_fixed31_32_sub_int(
+ avg_time_slots_per_mtp,
+ x),
+ 26));
+
+ REG_SET_2(DP_MSE_RATE_CNTL, 0,
+ DP_MSE_RATE_X, x,
+ DP_MSE_RATE_Y, y);
+
+ /* wait for update to be completed on the link */
+ /* i.e. DP_MSE_RATE_UPDATE_PENDING field (read only) */
+ /* is reset to 0 (not pending) */
+ REG_WAIT(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING,
+ 0,
+ 10, DP_MST_UPDATE_MAX_RETRY);
+}
+
+static void enc1_stream_encoder_update_hdmi_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* for bring up, disable dp double TODO */
+ REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1);
+
+ enc1_update_hdmi_info_packet(enc1, 0, &info_frame->avi);
+ enc1_update_hdmi_info_packet(enc1, 1, &info_frame->vendor);
+ enc1_update_hdmi_info_packet(enc1, 2, &info_frame->gamut);
+ enc1_update_hdmi_info_packet(enc1, 3, &info_frame->spd);
+ enc1_update_hdmi_info_packet(enc1, 4, &info_frame->hdrsmd);
+}
+
+static void enc1_stream_encoder_stop_hdmi_info_packets(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* stop generic packets 0 & 1 on HDMI */
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0);
+
+ /* stop generic packets 2 & 3 on HDMI */
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL1, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0);
+
+ /* stop generic packets 2 & 3 on HDMI */
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0);
+
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL3, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0);
+}
+
+void enc1_stream_encoder_update_dp_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t value = 0;
+
+ if (info_frame->vsc.valid)
+ enc1_update_generic_info_packet(
+ enc1,
+ 0, /* packetIndex */
+ &info_frame->vsc);
+
+ if (info_frame->spd.valid)
+ enc1_update_generic_info_packet(
+ enc1,
+ 2, /* packetIndex */
+ &info_frame->spd);
+
+ if (info_frame->hdrsmd.valid)
+ enc1_update_generic_info_packet(
+ enc1,
+ 3, /* packetIndex */
+ &info_frame->hdrsmd);
+
+ /* enable/disable transmission of packet(s).
+ * If enabled, packet transmission begins on the next frame
+ */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
+
+
+ /* This bit is the master enable bit.
+ * When enabling secondary stream engine,
+ * this master bit must also be set.
+ * This register shared with audio info frame.
+ * Therefore we need to enable master bit
+ * if at least on of the fields is not 0
+ */
+ value = REG_READ(DP_SEC_CNTL);
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+void enc1_stream_encoder_stop_dp_info_packets(
+ struct stream_encoder *enc)
+{
+ /* stop generic packets on DP */
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t value = 0;
+
+ REG_SET_10(DP_SEC_CNTL, 0,
+ DP_SEC_GSP0_ENABLE, 0,
+ DP_SEC_GSP1_ENABLE, 0,
+ DP_SEC_GSP2_ENABLE, 0,
+ DP_SEC_GSP3_ENABLE, 0,
+ DP_SEC_GSP4_ENABLE, 0,
+ DP_SEC_GSP5_ENABLE, 0,
+ DP_SEC_GSP6_ENABLE, 0,
+ DP_SEC_GSP7_ENABLE, 0,
+ DP_SEC_MPG_ENABLE, 0,
+ DP_SEC_STREAM_ENABLE, 0);
+
+ /* this register shared with audio info frame.
+ * therefore we need to keep master enabled
+ * if at least one of the fields is not 0 */
+ value = REG_READ(DP_SEC_CNTL);
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+}
+
+void enc1_stream_encoder_dp_blank(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t retries = 0;
+ uint32_t reg1 = 0;
+ uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
+
+ /* Note: For CZ, we are changing driver default to disable
+ * stream deferred to next VBLANK. If results are positive, we
+ * will make the same change to all DCE versions. There are a
+ * handful of panels that cannot handle disable stream at
+ * HBLANK and will result in a white line flash across the
+ * screen on stream disable.
+ */
+ REG_GET(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, &reg1);
+ if ((reg1 & 0x1) == 0)
+ /*stream not enabled*/
+ return;
+ /* Specify the video stream disable point
+ * (2 = start of the next vertical blank)
+ */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
+ /* Larger delay to wait until VBLANK - use max retry of
+ * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+ * a little more because we may not trust delay accuracy.
+ */
+ max_retries = DP_BLANK_MAX_RETRY * 150;
+
+ /* disable DP stream */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+
+ /* the encoder stops sending the video stream
+ * at the start of the vertical blanking.
+ * Poll for DP_VID_STREAM_STATUS == 0
+ */
+
+ REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS,
+ 0,
+ 10, max_retries);
+
+ ASSERT(retries <= max_retries);
+
+ /* Tell the DP encoder to ignore timing from CRTC, must be done after
+ * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
+ * complete, stream status will be stuck in video stream enabled state,
+ * i.e. DP_VID_STREAM_STATUS stuck at 1.
+ */
+
+ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
+}
+
+/* output video stream to link encoder */
+void enc1_stream_encoder_dp_unblank(
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
+ uint32_t n_vid = 0x8000;
+ uint32_t m_vid;
+
+ /* M / N = Fstream / Flink
+ * m_vid / n_vid = pixel rate / link rate
+ */
+
+ uint64_t m_vid_l = n_vid;
+
+ m_vid_l *= param->pixel_clk_khz;
+ m_vid_l = div_u64(m_vid_l,
+ param->link_settings.link_rate
+ * LINK_RATE_REF_FREQ_IN_KHZ);
+
+ m_vid = (uint32_t) m_vid_l;
+
+ /* enable auto measurement */
+
+ REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0);
+
+ /* auto measurement need 1 full 0x8000 symbol cycle to kick in,
+ * therefore program initial value for Mvid and Nvid
+ */
+
+ REG_UPDATE(DP_VID_N, DP_VID_N, n_vid);
+
+ REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
+
+ REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1);
+ }
+
+ /* set DIG_START to 0x1 to resync FIFO */
+
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+
+ /* switch DP encoder to CRTC data */
+
+ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
+
+ /* wait 100us for DIG/DP logic to prime
+ * (i.e. a few video lines)
+ */
+ udelay(100);
+
+ /* the hardware would start sending video at the start of the next DP
+ * frame (i.e. rising edge of the vblank).
+ * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this
+ * register has no effect on enable transition! HW always guarantees
+ * VID_STREAM enable at start of next frame, and this is not
+ * programmable
+ */
+
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+}
+
+void enc1_stream_encoder_set_avmute(
+ struct stream_encoder *enc,
+ bool enable)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ unsigned int value = enable ? 1 : 0;
+
+ REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, value);
+}
+
+
+#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1
+
+#include "include/audio_types.h"
+
+/**
+* speakersToChannels
+*
+* @brief
+* translate speakers to channels
+*
+* FL - Front Left
+* FR - Front Right
+* RL - Rear Left
+* RR - Rear Right
+* RC - Rear Center
+* FC - Front Center
+* FLC - Front Left Center
+* FRC - Front Right Center
+* RLC - Rear Left Center
+* RRC - Rear Right Center
+* LFE - Low Freq Effect
+*
+* FC
+* FLC FRC
+* FL FR
+*
+* LFE
+* ()
+*
+*
+* RL RR
+* RLC RRC
+* RC
+*
+* ch 8 7 6 5 4 3 2 1
+* 0b00000011 - - - - - - FR FL
+* 0b00000111 - - - - - LFE FR FL
+* 0b00001011 - - - - FC - FR FL
+* 0b00001111 - - - - FC LFE FR FL
+* 0b00010011 - - - RC - - FR FL
+* 0b00010111 - - - RC - LFE FR FL
+* 0b00011011 - - - RC FC - FR FL
+* 0b00011111 - - - RC FC LFE FR FL
+* 0b00110011 - - RR RL - - FR FL
+* 0b00110111 - - RR RL - LFE FR FL
+* 0b00111011 - - RR RL FC - FR FL
+* 0b00111111 - - RR RL FC LFE FR FL
+* 0b01110011 - RC RR RL - - FR FL
+* 0b01110111 - RC RR RL - LFE FR FL
+* 0b01111011 - RC RR RL FC - FR FL
+* 0b01111111 - RC RR RL FC LFE FR FL
+* 0b11110011 RRC RLC RR RL - - FR FL
+* 0b11110111 RRC RLC RR RL - LFE FR FL
+* 0b11111011 RRC RLC RR RL FC - FR FL
+* 0b11111111 RRC RLC RR RL FC LFE FR FL
+* 0b11000011 FRC FLC - - - - FR FL
+* 0b11000111 FRC FLC - - - LFE FR FL
+* 0b11001011 FRC FLC - - FC - FR FL
+* 0b11001111 FRC FLC - - FC LFE FR FL
+* 0b11010011 FRC FLC - RC - - FR FL
+* 0b11010111 FRC FLC - RC - LFE FR FL
+* 0b11011011 FRC FLC - RC FC - FR FL
+* 0b11011111 FRC FLC - RC FC LFE FR FL
+* 0b11110011 FRC FLC RR RL - - FR FL
+* 0b11110111 FRC FLC RR RL - LFE FR FL
+* 0b11111011 FRC FLC RR RL FC - FR FL
+* 0b11111111 FRC FLC RR RL FC LFE FR FL
+*
+* @param
+* speakers - speaker information as it comes from CEA audio block
+*/
+/* translate speakers to channels */
+
+union audio_cea_channels {
+ uint8_t all;
+ struct audio_cea_channels_bits {
+ uint32_t FL:1;
+ uint32_t FR:1;
+ uint32_t LFE:1;
+ uint32_t FC:1;
+ uint32_t RL_RC:1;
+ uint32_t RR:1;
+ uint32_t RC_RLC_FLC:1;
+ uint32_t RRC_FRC:1;
+ } channels;
+};
+
+struct audio_clock_info {
+ /* pixel clock frequency*/
+ uint32_t pixel_clock_in_10khz;
+ /* N - 32KHz audio */
+ uint32_t n_32khz;
+ /* CTS - 32KHz audio*/
+ uint32_t cts_32khz;
+ uint32_t n_44khz;
+ uint32_t cts_44khz;
+ uint32_t n_48khz;
+ uint32_t cts_48khz;
+};
+
+/* 25.2MHz/1.001*/
+/* 25.2MHz/1.001*/
+/* 25.2MHz*/
+/* 27MHz */
+/* 27MHz*1.001*/
+/* 27MHz*1.001*/
+/* 54MHz*/
+/* 54MHz*1.001*/
+/* 74.25MHz/1.001*/
+/* 74.25MHz*/
+/* 148.5MHz/1.001*/
+/* 148.5MHz*/
+
+static const struct audio_clock_info audio_clock_info_table[16] = {
+ {2517, 4576, 28125, 7007, 31250, 6864, 28125},
+ {2518, 4576, 28125, 7007, 31250, 6864, 28125},
+ {2520, 4096, 25200, 6272, 28000, 6144, 25200},
+ {2700, 4096, 27000, 6272, 30000, 6144, 27000},
+ {2702, 4096, 27027, 6272, 30030, 6144, 27027},
+ {2703, 4096, 27027, 6272, 30030, 6144, 27027},
+ {5400, 4096, 54000, 6272, 60000, 6144, 54000},
+ {5405, 4096, 54054, 6272, 60060, 6144, 54054},
+ {7417, 11648, 210937, 17836, 234375, 11648, 140625},
+ {7425, 4096, 74250, 6272, 82500, 6144, 74250},
+ {14835, 11648, 421875, 8918, 234375, 5824, 140625},
+ {14850, 4096, 148500, 6272, 165000, 6144, 148500},
+ {29670, 5824, 421875, 4459, 234375, 5824, 281250},
+ {29700, 3072, 222750, 4704, 247500, 5120, 247500},
+ {59340, 5824, 843750, 8918, 937500, 5824, 562500},
+ {59400, 3072, 445500, 9408, 990000, 6144, 594000}
+};
+
+static const struct audio_clock_info audio_clock_info_table_36bpc[14] = {
+ {2517, 9152, 84375, 7007, 48875, 9152, 56250},
+ {2518, 9152, 84375, 7007, 48875, 9152, 56250},
+ {2520, 4096, 37800, 6272, 42000, 6144, 37800},
+ {2700, 4096, 40500, 6272, 45000, 6144, 40500},
+ {2702, 8192, 81081, 6272, 45045, 8192, 54054},
+ {2703, 8192, 81081, 6272, 45045, 8192, 54054},
+ {5400, 4096, 81000, 6272, 90000, 6144, 81000},
+ {5405, 4096, 81081, 6272, 90090, 6144, 81081},
+ {7417, 11648, 316406, 17836, 351562, 11648, 210937},
+ {7425, 4096, 111375, 6272, 123750, 6144, 111375},
+ {14835, 11648, 632812, 17836, 703125, 11648, 421875},
+ {14850, 4096, 222750, 6272, 247500, 6144, 222750},
+ {29670, 5824, 632812, 8918, 703125, 5824, 421875},
+ {29700, 4096, 445500, 4704, 371250, 5120, 371250}
+};
+
+static const struct audio_clock_info audio_clock_info_table_48bpc[14] = {
+ {2517, 4576, 56250, 7007, 62500, 6864, 56250},
+ {2518, 4576, 56250, 7007, 62500, 6864, 56250},
+ {2520, 4096, 50400, 6272, 56000, 6144, 50400},
+ {2700, 4096, 54000, 6272, 60000, 6144, 54000},
+ {2702, 4096, 54054, 6267, 60060, 8192, 54054},
+ {2703, 4096, 54054, 6272, 60060, 8192, 54054},
+ {5400, 4096, 108000, 6272, 120000, 6144, 108000},
+ {5405, 4096, 108108, 6272, 120120, 6144, 108108},
+ {7417, 11648, 421875, 17836, 468750, 11648, 281250},
+ {7425, 4096, 148500, 6272, 165000, 6144, 148500},
+ {14835, 11648, 843750, 8918, 468750, 11648, 281250},
+ {14850, 4096, 297000, 6272, 330000, 6144, 297000},
+ {29670, 5824, 843750, 4459, 468750, 5824, 562500},
+ {29700, 3072, 445500, 4704, 495000, 5120, 495000}
+
+
+};
+
+static union audio_cea_channels speakers_to_channels(
+ struct audio_speaker_flags speaker_flags)
+{
+ union audio_cea_channels cea_channels = {0};
+
+ /* these are one to one */
+ cea_channels.channels.FL = speaker_flags.FL_FR;
+ cea_channels.channels.FR = speaker_flags.FL_FR;
+ cea_channels.channels.LFE = speaker_flags.LFE;
+ cea_channels.channels.FC = speaker_flags.FC;
+
+ /* if Rear Left and Right exist move RC speaker to channel 7
+ * otherwise to channel 5
+ */
+ if (speaker_flags.RL_RR) {
+ cea_channels.channels.RL_RC = speaker_flags.RL_RR;
+ cea_channels.channels.RR = speaker_flags.RL_RR;
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
+ } else {
+ cea_channels.channels.RL_RC = speaker_flags.RC;
+ }
+
+ /* FRONT Left Right Center and REAR Left Right Center are exclusive */
+ if (speaker_flags.FLC_FRC) {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
+ } else {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
+ }
+
+ return cea_channels;
+}
+
+static uint32_t calc_max_audio_packets_per_line(
+ const struct audio_crtc_info *crtc_info)
+{
+ uint32_t max_packets_per_line;
+
+ max_packets_per_line =
+ crtc_info->h_total - crtc_info->h_active;
+
+ if (crtc_info->pixel_repetition)
+ max_packets_per_line *= crtc_info->pixel_repetition;
+
+ /* for other hdmi features */
+ max_packets_per_line -= 58;
+ /* for Control Period */
+ max_packets_per_line -= 16;
+ /* Number of Audio Packets per Line */
+ max_packets_per_line /= 32;
+
+ return max_packets_per_line;
+}
+
+static void get_audio_clock_info(
+ enum dc_color_depth color_depth,
+ uint32_t crtc_pixel_clock_in_khz,
+ uint32_t actual_pixel_clock_in_khz,
+ struct audio_clock_info *audio_clock_info)
+{
+ const struct audio_clock_info *clock_info;
+ uint32_t index;
+ uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_in_khz / 10;
+ uint32_t audio_array_size;
+
+ switch (color_depth) {
+ case COLOR_DEPTH_161616:
+ clock_info = audio_clock_info_table_48bpc;
+ audio_array_size = ARRAY_SIZE(
+ audio_clock_info_table_48bpc);
+ break;
+ case COLOR_DEPTH_121212:
+ clock_info = audio_clock_info_table_36bpc;
+ audio_array_size = ARRAY_SIZE(
+ audio_clock_info_table_36bpc);
+ break;
+ default:
+ clock_info = audio_clock_info_table;
+ audio_array_size = ARRAY_SIZE(
+ audio_clock_info_table);
+ break;
+ }
+
+ if (clock_info != NULL) {
+ /* search for exact pixel clock in table */
+ for (index = 0; index < audio_array_size; index++) {
+ if (clock_info[index].pixel_clock_in_10khz >
+ crtc_pixel_clock_in_10khz)
+ break; /* not match */
+ else if (clock_info[index].pixel_clock_in_10khz ==
+ crtc_pixel_clock_in_10khz) {
+ /* match found */
+ *audio_clock_info = clock_info[index];
+ return;
+ }
+ }
+ }
+
+ /* not found */
+ if (actual_pixel_clock_in_khz == 0)
+ actual_pixel_clock_in_khz = crtc_pixel_clock_in_khz;
+
+ /* See HDMI spec the table entry under
+ * pixel clock of "Other". */
+ audio_clock_info->pixel_clock_in_10khz =
+ actual_pixel_clock_in_khz / 10;
+ audio_clock_info->cts_32khz = actual_pixel_clock_in_khz;
+ audio_clock_info->cts_44khz = actual_pixel_clock_in_khz;
+ audio_clock_info->cts_48khz = actual_pixel_clock_in_khz;
+
+ audio_clock_info->n_32khz = 4096;
+ audio_clock_info->n_44khz = 6272;
+ audio_clock_info->n_48khz = 6144;
+}
+
+static void enc1_se_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *audio_info)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ uint32_t speakers = 0;
+ uint32_t channels = 0;
+
+ ASSERT(audio_info);
+ if (audio_info == NULL)
+ /* This should not happen.it does so we don't get BSOD*/
+ return;
+
+ speakers = audio_info->flags.info.ALLSPEAKERS;
+ channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
+
+ /* setup the audio stream source select (audio -> dig mapping) */
+ REG_SET(AFMT_AUDIO_SRC_CONTROL, 0, AFMT_AUDIO_SRC_SELECT, az_inst);
+
+ /* Channel allocation */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels);
+}
+
+static void enc1_se_setup_hdmi_audio(
+ struct stream_encoder *enc,
+ const struct audio_crtc_info *crtc_info)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ struct audio_clock_info audio_clock_info = {0};
+ uint32_t max_packets_per_line;
+
+ /* For now still do calculation, although this field is ignored when
+ * above HDMI_PACKET_GEN_VERSION set to 1
+ */
+ max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
+
+ /* HDMI_AUDIO_PACKET_CONTROL */
+ REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL,
+ HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line,
+ HDMI_AUDIO_DELAY_EN, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL2 */
+ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+ AFMT_AUDIO_LAYOUT_OVRD, 0,
+ AFMT_60958_OSF_OVRD, 0);
+
+ /* HDMI_ACR_PACKET_CONTROL */
+ REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL,
+ HDMI_ACR_AUTO_SEND, 1,
+ HDMI_ACR_SOURCE, 0,
+ HDMI_ACR_AUDIO_PRIORITY, 0);
+
+ /* Program audio clock sample/regeneration parameters */
+ get_audio_clock_info(crtc_info->color_depth,
+ crtc_info->requested_pixel_clock,
+ crtc_info->calculated_pixel_clock,
+ &audio_clock_info);
+ DC_LOG_HW_AUDIO(
+ "\n%s:Input::requested_pixel_clock = %d" \
+ "calculated_pixel_clock = %d \n", __func__, \
+ crtc_info->requested_pixel_clock, \
+ crtc_info->calculated_pixel_clock);
+
+ /* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */
+ REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz);
+
+ /* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */
+ REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz);
+
+ /* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */
+ REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz);
+
+ /* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */
+ REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz);
+
+ /* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */
+ REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz);
+
+ /* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */
+ REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz);
+
+ /* Video driver cannot know in advance which sample rate will
+ * be used by HD Audio driver
+ * HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is
+ * programmed below in interruppt callback
+ */
+
+ /* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK &
+ * AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK
+ */
+ REG_UPDATE_2(AFMT_60958_0,
+ AFMT_60958_CS_CHANNEL_NUMBER_L, 1,
+ AFMT_60958_CS_CLOCK_ACCURACY, 0);
+
+ /* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */
+ REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
+
+ /* AFMT_60958_2 now keep this settings until
+ * Programming guide comes out
+ */
+ REG_UPDATE_6(AFMT_60958_2,
+ AFMT_60958_CS_CHANNEL_NUMBER_2, 3,
+ AFMT_60958_CS_CHANNEL_NUMBER_3, 4,
+ AFMT_60958_CS_CHANNEL_NUMBER_4, 5,
+ AFMT_60958_CS_CHANNEL_NUMBER_5, 6,
+ AFMT_60958_CS_CHANNEL_NUMBER_6, 7,
+ AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
+}
+
+static void enc1_se_setup_dp_audio(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* --- DP Audio packet configurations --- */
+
+ /* ATP Configuration */
+ REG_SET(DP_SEC_AUD_N, 0,
+ DP_SEC_AUD_N, DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT);
+
+ /* Async/auto-calc timestamp mode */
+ REG_SET(DP_SEC_TIMESTAMP, 0, DP_SEC_TIMESTAMP_MODE,
+ DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC);
+
+ /* --- The following are the registers
+ * copied from the SetupHDMI ---
+ */
+
+ /* AFMT_AUDIO_PACKET_CONTROL */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL2 */
+ /* Program the ATP and AIP next */
+ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+ AFMT_AUDIO_LAYOUT_OVRD, 0,
+ AFMT_60958_OSF_OVRD, 0);
+
+ /* AFMT_INFOFRAME_CONTROL0 */
+ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+
+ /* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
+ REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0);
+}
+
+static void enc1_se_enable_audio_clock(
+ struct stream_encoder *enc,
+ bool enable)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (REG(AFMT_CNTL) == 0)
+ return; /* DCE8/10 does not have this register */
+
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, !!enable);
+
+ /* wait for AFMT clock to turn on,
+ * expectation: this should complete in 1-2 reads
+ *
+ * REG_WAIT(AFMT_CNTL, AFMT_AUDIO_CLOCK_ON, !!enable, 1, 10);
+ *
+ * TODO: wait for clock_on does not work well. May need HW
+ * program sequence. But audio seems work normally even without wait
+ * for clock_on status change
+ */
+}
+
+static void enc1_se_enable_dp_audio(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* Enable Audio packets */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
+
+ /* Program the ATP and AIP next */
+ REG_UPDATE_2(DP_SEC_CNTL,
+ DP_SEC_ATP_ENABLE, 1,
+ DP_SEC_AIP_ENABLE, 1);
+
+ /* Program STREAM_ENABLE after all the other enables. */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+static void enc1_se_disable_dp_audio(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t value = 0;
+
+ /* Disable Audio packets */
+ REG_UPDATE_5(DP_SEC_CNTL,
+ DP_SEC_ASP_ENABLE, 0,
+ DP_SEC_ATP_ENABLE, 0,
+ DP_SEC_AIP_ENABLE, 0,
+ DP_SEC_ACM_ENABLE, 0,
+ DP_SEC_STREAM_ENABLE, 0);
+
+ /* This register shared with encoder info frame. Therefore we need to
+ * keep master enabled if at least on of the fields is not 0
+ */
+ value = REG_READ(DP_SEC_CNTL);
+ if (value != 0)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+}
+
+void enc1_se_audio_mute_control(
+ struct stream_encoder *enc,
+ bool mute)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute);
+}
+
+void enc1_se_dp_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info)
+{
+ enc1_se_audio_setup(enc, az_inst, info);
+}
+
+void enc1_se_dp_audio_enable(
+ struct stream_encoder *enc)
+{
+ enc1_se_enable_audio_clock(enc, true);
+ enc1_se_setup_dp_audio(enc);
+ enc1_se_enable_dp_audio(enc);
+}
+
+void enc1_se_dp_audio_disable(
+ struct stream_encoder *enc)
+{
+ enc1_se_disable_dp_audio(enc);
+ enc1_se_enable_audio_clock(enc, false);
+}
+
+void enc1_se_hdmi_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info,
+ struct audio_crtc_info *audio_crtc_info)
+{
+ enc1_se_enable_audio_clock(enc, true);
+ enc1_se_setup_hdmi_audio(enc, audio_crtc_info);
+ enc1_se_audio_setup(enc, az_inst, info);
+}
+
+void enc1_se_hdmi_audio_disable(
+ struct stream_encoder *enc)
+{
+ enc1_se_enable_audio_clock(enc, false);
+}
+
+
+void enc1_setup_stereo_sync(
+ struct stream_encoder *enc,
+ int tg_inst, bool enable)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, tg_inst);
+ REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable);
+}
+
+
+static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
+ .dp_set_stream_attribute =
+ enc1_stream_encoder_dp_set_stream_attribute,
+ .hdmi_set_stream_attribute =
+ enc1_stream_encoder_hdmi_set_stream_attribute,
+ .dvi_set_stream_attribute =
+ enc1_stream_encoder_dvi_set_stream_attribute,
+ .set_mst_bandwidth =
+ enc1_stream_encoder_set_mst_bandwidth,
+ .update_hdmi_info_packets =
+ enc1_stream_encoder_update_hdmi_info_packets,
+ .stop_hdmi_info_packets =
+ enc1_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets =
+ enc1_stream_encoder_update_dp_info_packets,
+ .stop_dp_info_packets =
+ enc1_stream_encoder_stop_dp_info_packets,
+ .dp_blank =
+ enc1_stream_encoder_dp_blank,
+ .dp_unblank =
+ enc1_stream_encoder_dp_unblank,
+ .audio_mute_control = enc1_se_audio_mute_control,
+
+ .dp_audio_setup = enc1_se_dp_audio_setup,
+ .dp_audio_enable = enc1_se_dp_audio_enable,
+ .dp_audio_disable = enc1_se_dp_audio_disable,
+
+ .hdmi_audio_setup = enc1_se_hdmi_audio_setup,
+ .hdmi_audio_disable = enc1_se_hdmi_audio_disable,
+ .setup_stereo_sync = enc1_setup_stereo_sync,
+ .set_avmute = enc1_stream_encoder_set_avmute,
+};
+
+void dcn10_stream_encoder_construct(
+ struct dcn10_stream_encoder *enc1,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id,
+ const struct dcn10_stream_enc_registers *regs,
+ const struct dcn10_stream_encoder_shift *se_shift,
+ const struct dcn10_stream_encoder_mask *se_mask)
+{
+ enc1->base.funcs = &dcn10_str_enc_funcs;
+ enc1->base.ctx = ctx;
+ enc1->base.id = eng_id;
+ enc1->base.bp = bp;
+ enc1->regs = regs;
+ enc1->se_shift = se_shift;
+ enc1->se_mask = se_mask;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
new file mode 100644
index 0000000..6b3e4de
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -0,0 +1,524 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_STREAM_ENCODER_DCN10_H__
+#define __DC_STREAM_ENCODER_DCN10_H__
+
+#include "stream_encoder.h"
+
+#define DCN10STRENC_FROM_STRENC(stream_encoder)\
+ container_of(stream_encoder, struct dcn10_stream_encoder, base)
+
+#define SE_COMMON_DCN_REG_LIST(id) \
+ SRI(AFMT_CNTL, DIG, id), \
+ SRI(AFMT_GENERIC_0, DIG, id), \
+ SRI(AFMT_GENERIC_1, DIG, id), \
+ SRI(AFMT_GENERIC_2, DIG, id), \
+ SRI(AFMT_GENERIC_3, DIG, id), \
+ SRI(AFMT_GENERIC_4, DIG, id), \
+ SRI(AFMT_GENERIC_5, DIG, id), \
+ SRI(AFMT_GENERIC_6, DIG, id), \
+ SRI(AFMT_GENERIC_7, DIG, id), \
+ SRI(AFMT_GENERIC_HDR, DIG, id), \
+ SRI(AFMT_INFOFRAME_CONTROL0, DIG, id), \
+ SRI(AFMT_VBI_PACKET_CONTROL, DIG, id), \
+ SRI(AFMT_VBI_PACKET_CONTROL1, DIG, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL, DIG, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL2, DIG, id), \
+ SRI(AFMT_AUDIO_SRC_CONTROL, DIG, id), \
+ SRI(AFMT_60958_0, DIG, id), \
+ SRI(AFMT_60958_1, DIG, id), \
+ SRI(AFMT_60958_2, DIG, id), \
+ SRI(DIG_FE_CNTL, DIG, id), \
+ SRI(HDMI_CONTROL, DIG, id), \
+ SRI(HDMI_DB_CONTROL, DIG, id), \
+ SRI(HDMI_GC, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
+ SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \
+ SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \
+ SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \
+ SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\
+ SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\
+ SRI(HDMI_ACR_32_0, DIG, id),\
+ SRI(HDMI_ACR_32_1, DIG, id),\
+ SRI(HDMI_ACR_44_0, DIG, id),\
+ SRI(HDMI_ACR_44_1, DIG, id),\
+ SRI(HDMI_ACR_48_0, DIG, id),\
+ SRI(HDMI_ACR_48_1, DIG, id),\
+ SRI(DP_DB_CNTL, DP, id), \
+ SRI(DP_MSA_MISC, DP, id), \
+ SRI(DP_MSA_COLORIMETRY, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM1, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM2, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM3, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM4, DP, id), \
+ SRI(DP_MSE_RATE_CNTL, DP, id), \
+ SRI(DP_MSE_RATE_UPDATE, DP, id), \
+ SRI(DP_PIXEL_FORMAT, DP, id), \
+ SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_STEER_FIFO, DP, id), \
+ SRI(DP_VID_M, DP, id), \
+ SRI(DP_VID_N, DP, id), \
+ SRI(DP_VID_STREAM_CNTL, DP, id), \
+ SRI(DP_VID_TIMING, DP, id), \
+ SRI(DP_SEC_AUD_N, DP, id), \
+ SRI(DP_SEC_TIMESTAMP, DP, id)
+
+#define SE_DCN_REG_LIST(id)\
+ SE_COMMON_DCN_REG_LIST(id)
+
+
+struct dcn10_stream_enc_registers {
+ uint32_t AFMT_CNTL;
+ uint32_t AFMT_AVI_INFO0;
+ uint32_t AFMT_AVI_INFO1;
+ uint32_t AFMT_AVI_INFO2;
+ uint32_t AFMT_AVI_INFO3;
+ uint32_t AFMT_GENERIC_0;
+ uint32_t AFMT_GENERIC_1;
+ uint32_t AFMT_GENERIC_2;
+ uint32_t AFMT_GENERIC_3;
+ uint32_t AFMT_GENERIC_4;
+ uint32_t AFMT_GENERIC_5;
+ uint32_t AFMT_GENERIC_6;
+ uint32_t AFMT_GENERIC_7;
+ uint32_t AFMT_GENERIC_HDR;
+ uint32_t AFMT_INFOFRAME_CONTROL0;
+ uint32_t AFMT_VBI_PACKET_CONTROL;
+ uint32_t AFMT_VBI_PACKET_CONTROL1;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL2;
+ uint32_t AFMT_AUDIO_SRC_CONTROL;
+ uint32_t AFMT_60958_0;
+ uint32_t AFMT_60958_1;
+ uint32_t AFMT_60958_2;
+ uint32_t DIG_FE_CNTL;
+ uint32_t DP_MSE_RATE_CNTL;
+ uint32_t DP_MSE_RATE_UPDATE;
+ uint32_t DP_PIXEL_FORMAT;
+ uint32_t DP_SEC_CNTL;
+ uint32_t DP_STEER_FIFO;
+ uint32_t DP_VID_M;
+ uint32_t DP_VID_N;
+ uint32_t DP_VID_STREAM_CNTL;
+ uint32_t DP_VID_TIMING;
+ uint32_t DP_SEC_AUD_N;
+ uint32_t DP_SEC_TIMESTAMP;
+ uint32_t HDMI_CONTROL;
+ uint32_t HDMI_GC;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL0;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL1;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL2;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL3;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL4;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL5;
+ uint32_t HDMI_INFOFRAME_CONTROL0;
+ uint32_t HDMI_INFOFRAME_CONTROL1;
+ uint32_t HDMI_VBI_PACKET_CONTROL;
+ uint32_t HDMI_AUDIO_PACKET_CONTROL;
+ uint32_t HDMI_ACR_PACKET_CONTROL;
+ uint32_t HDMI_ACR_32_0;
+ uint32_t HDMI_ACR_32_1;
+ uint32_t HDMI_ACR_44_0;
+ uint32_t HDMI_ACR_44_1;
+ uint32_t HDMI_ACR_48_0;
+ uint32_t HDMI_ACR_48_1;
+ uint32_t DP_DB_CNTL;
+ uint32_t DP_MSA_MISC;
+ uint32_t DP_MSA_COLORIMETRY;
+ uint32_t DP_MSA_TIMING_PARAM1;
+ uint32_t DP_MSA_TIMING_PARAM2;
+ uint32_t DP_MSA_TIMING_PARAM3;
+ uint32_t DP_MSA_TIMING_PARAM4;
+ uint32_t HDMI_DB_CONTROL;
+};
+
+
+#define SE_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB0, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB1, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB2, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB3, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
+ SE_SF(DIG0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
+ SE_SF(DP0_DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\
+ SE_SF(DP0_DP_VID_N, DP_VID_N, mask_sh),\
+ SE_SF(DP0_DP_VID_M, DP_VID_M, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_START, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
+ SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
+ SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\
+ SE_SF(DIG0_AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\
+ SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_HTOTAL, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_VTOTAL, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_HSTART, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_VSTART, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCPOLARITY, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCPOLARITY, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\
+ SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCN10(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_SOC(mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_LINE, mask_sh)
+
+
+#define SE_REG_FIELD_LIST_DCN1_0(type) \
+ type AFMT_GENERIC_INDEX;\
+ type AFMT_GENERIC_HB0;\
+ type AFMT_GENERIC_HB1;\
+ type AFMT_GENERIC_HB2;\
+ type AFMT_GENERIC_HB3;\
+ type AFMT_GENERIC_LOCK_STATUS;\
+ type AFMT_GENERIC_CONFLICT;\
+ type AFMT_GENERIC_CONFLICT_CLR;\
+ type AFMT_GENERIC0_FRAME_UPDATE_PENDING;\
+ type AFMT_GENERIC1_FRAME_UPDATE_PENDING;\
+ type AFMT_GENERIC2_FRAME_UPDATE_PENDING;\
+ type AFMT_GENERIC3_FRAME_UPDATE_PENDING;\
+ type AFMT_GENERIC4_FRAME_UPDATE_PENDING;\
+ type AFMT_GENERIC5_FRAME_UPDATE_PENDING;\
+ type AFMT_GENERIC6_FRAME_UPDATE_PENDING;\
+ type AFMT_GENERIC7_FRAME_UPDATE_PENDING;\
+ type AFMT_GENERIC0_FRAME_UPDATE;\
+ type AFMT_GENERIC1_FRAME_UPDATE;\
+ type AFMT_GENERIC2_FRAME_UPDATE;\
+ type AFMT_GENERIC3_FRAME_UPDATE;\
+ type AFMT_GENERIC4_FRAME_UPDATE;\
+ type AFMT_GENERIC5_FRAME_UPDATE;\
+ type AFMT_GENERIC6_FRAME_UPDATE;\
+ type AFMT_GENERIC7_FRAME_UPDATE;\
+ type HDMI_GENERIC0_CONT;\
+ type HDMI_GENERIC0_SEND;\
+ type HDMI_GENERIC0_LINE;\
+ type HDMI_GENERIC1_CONT;\
+ type HDMI_GENERIC1_SEND;\
+ type HDMI_GENERIC1_LINE;\
+ type HDMI_GENERIC2_CONT;\
+ type HDMI_GENERIC2_SEND;\
+ type HDMI_GENERIC2_LINE;\
+ type HDMI_GENERIC3_CONT;\
+ type HDMI_GENERIC3_SEND;\
+ type HDMI_GENERIC3_LINE;\
+ type HDMI_GENERIC4_CONT;\
+ type HDMI_GENERIC4_SEND;\
+ type HDMI_GENERIC4_LINE;\
+ type HDMI_GENERIC5_CONT;\
+ type HDMI_GENERIC5_SEND;\
+ type HDMI_GENERIC5_LINE;\
+ type HDMI_GENERIC6_CONT;\
+ type HDMI_GENERIC6_SEND;\
+ type HDMI_GENERIC6_LINE;\
+ type HDMI_GENERIC7_CONT;\
+ type HDMI_GENERIC7_SEND;\
+ type HDMI_GENERIC7_LINE;\
+ type DP_PIXEL_ENCODING;\
+ type DP_COMPONENT_DEPTH;\
+ type HDMI_PACKET_GEN_VERSION;\
+ type HDMI_KEEPOUT_MODE;\
+ type HDMI_DEEP_COLOR_ENABLE;\
+ type HDMI_CLOCK_CHANNEL_RATE;\
+ type HDMI_DEEP_COLOR_DEPTH;\
+ type HDMI_GC_CONT;\
+ type HDMI_GC_SEND;\
+ type HDMI_NULL_SEND;\
+ type HDMI_DATA_SCRAMBLE_EN;\
+ type HDMI_AUDIO_INFO_SEND;\
+ type AFMT_AUDIO_INFO_UPDATE;\
+ type HDMI_AUDIO_INFO_LINE;\
+ type HDMI_GC_AVMUTE;\
+ type DP_MSE_RATE_X;\
+ type DP_MSE_RATE_Y;\
+ type DP_MSE_RATE_UPDATE_PENDING;\
+ type DP_SEC_GSP0_ENABLE;\
+ type DP_SEC_STREAM_ENABLE;\
+ type DP_SEC_GSP1_ENABLE;\
+ type DP_SEC_GSP2_ENABLE;\
+ type DP_SEC_GSP3_ENABLE;\
+ type DP_SEC_GSP4_ENABLE;\
+ type DP_SEC_GSP5_ENABLE;\
+ type DP_SEC_GSP6_ENABLE;\
+ type DP_SEC_GSP7_ENABLE;\
+ type DP_SEC_MPG_ENABLE;\
+ type DP_VID_STREAM_DIS_DEFER;\
+ type DP_VID_STREAM_ENABLE;\
+ type DP_VID_STREAM_STATUS;\
+ type DP_STEER_FIFO_RESET;\
+ type DP_VID_M_N_GEN_EN;\
+ type DP_VID_N;\
+ type DP_VID_M;\
+ type DIG_START;\
+ type AFMT_AUDIO_SRC_SELECT;\
+ type AFMT_AUDIO_CHANNEL_ENABLE;\
+ type HDMI_AUDIO_PACKETS_PER_LINE;\
+ type HDMI_AUDIO_DELAY_EN;\
+ type AFMT_60958_CS_UPDATE;\
+ type AFMT_AUDIO_LAYOUT_OVRD;\
+ type AFMT_60958_OSF_OVRD;\
+ type HDMI_ACR_AUTO_SEND;\
+ type HDMI_ACR_SOURCE;\
+ type HDMI_ACR_AUDIO_PRIORITY;\
+ type HDMI_ACR_CTS_32;\
+ type HDMI_ACR_N_32;\
+ type HDMI_ACR_CTS_44;\
+ type HDMI_ACR_N_44;\
+ type HDMI_ACR_CTS_48;\
+ type HDMI_ACR_N_48;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_L;\
+ type AFMT_60958_CS_CLOCK_ACCURACY;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_R;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_2;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_3;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_4;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_5;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_6;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_7;\
+ type DP_SEC_AUD_N;\
+ type DP_SEC_TIMESTAMP_MODE;\
+ type DP_SEC_ASP_ENABLE;\
+ type DP_SEC_ATP_ENABLE;\
+ type DP_SEC_AIP_ENABLE;\
+ type DP_SEC_ACM_ENABLE;\
+ type AFMT_AUDIO_SAMPLE_SEND;\
+ type AFMT_AUDIO_CLOCK_EN;\
+ type TMDS_PIXEL_ENCODING;\
+ type TMDS_COLOR_FORMAT;\
+ type DIG_STEREOSYNC_SELECT;\
+ type DIG_STEREOSYNC_GATE_EN;\
+ type DP_DB_DISABLE;\
+ type DP_MSA_MISC0;\
+ type DP_MSA_HTOTAL;\
+ type DP_MSA_VTOTAL;\
+ type DP_MSA_HSTART;\
+ type DP_MSA_VSTART;\
+ type DP_MSA_HSYNCWIDTH;\
+ type DP_MSA_HSYNCPOLARITY;\
+ type DP_MSA_VSYNCWIDTH;\
+ type DP_MSA_VSYNCPOLARITY;\
+ type DP_MSA_HWIDTH;\
+ type DP_MSA_VHEIGHT;\
+ type HDMI_DB_DISABLE;\
+ type DP_VID_N_MUL;\
+ type DP_VID_M_DOUBLE_VALUE_EN
+
+struct dcn10_stream_encoder_shift {
+ SE_REG_FIELD_LIST_DCN1_0(uint8_t);
+};
+
+struct dcn10_stream_encoder_mask {
+ SE_REG_FIELD_LIST_DCN1_0(uint32_t);
+};
+
+struct dcn10_stream_encoder {
+ struct stream_encoder base;
+ const struct dcn10_stream_enc_registers *regs;
+ const struct dcn10_stream_encoder_shift *se_shift;
+ const struct dcn10_stream_encoder_mask *se_mask;
+};
+
+void dcn10_stream_encoder_construct(
+ struct dcn10_stream_encoder *enc1,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id,
+ const struct dcn10_stream_enc_registers *regs,
+ const struct dcn10_stream_encoder_shift *se_shift,
+ const struct dcn10_stream_encoder_mask *se_mask);
+
+void enc1_update_generic_info_packet(
+ struct dcn10_stream_encoder *enc1,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet);
+
+void enc1_stream_encoder_dp_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space);
+
+void enc1_stream_encoder_hdmi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio);
+
+void enc1_stream_encoder_dvi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link);
+
+void enc1_stream_encoder_set_mst_bandwidth(
+ struct stream_encoder *enc,
+ struct fixed31_32 avg_time_slots_per_mtp);
+
+void enc1_stream_encoder_update_dp_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame);
+
+void enc1_stream_encoder_stop_dp_info_packets(
+ struct stream_encoder *enc);
+
+void enc1_stream_encoder_dp_blank(
+ struct stream_encoder *enc);
+
+void enc1_stream_encoder_dp_unblank(
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param);
+
+void enc1_setup_stereo_sync(
+ struct stream_encoder *enc,
+ int tg_inst, bool enable);
+
+void enc1_stream_encoder_set_avmute(
+ struct stream_encoder *enc,
+ bool enable);
+
+void enc1_se_audio_mute_control(
+ struct stream_encoder *enc,
+ bool mute);
+
+void enc1_se_dp_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info);
+
+void enc1_se_dp_audio_enable(
+ struct stream_encoder *enc);
+
+void enc1_se_dp_audio_disable(
+ struct stream_encoder *enc);
+
+void enc1_se_hdmi_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info,
+ struct audio_crtc_info *audio_crtc_info);
+
+void enc1_se_hdmi_audio_disable(
+ struct stream_encoder *enc);
+
+#endif /* __DC_STREAM_ENCODER_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 22e7ee7..8eafe1a 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -341,6 +341,10 @@ bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
unsigned long long dm_get_timestamp(struct dc_context *ctx);
+unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
+ unsigned long long current_time_stamp,
+ unsigned long long last_time_stamp);
+
/*
* performance tracing
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index c109b2c..fd9d97a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -26,75 +26,89 @@
#include "display_mode_lib.h"
#include "dc_features.h"
+static const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = 164,
+ .dpte_buffer_size_in_pte_reqs = 42,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .pte_enable = 1,
+ .pte_chunk_size_kbytes = 2,
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 2,
+ .line_buffer_size_bits = 589824,
+ .max_line_buffer_lines = 12,
+ .IsLineBufferBppFixed = 0,
+ .LineBufferFixedBpp = -1,
+ .writeback_luma_buffer_size_kbytes = 12,
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .max_num_dpp = 4,
+ .max_num_wb = 2,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 4,
+ .max_vscl_ratio = 4,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.10,
+ .min_vblank_lines = 14,
+ .dppclk_delay_subtotal = 90,
+ .dispclk_delay_subtotal = 42,
+ .dcfclk_cstate_latency = 10,
+ .max_inter_dcn_tile_repeaters = 8,
+ .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
+ .bug_forcing_LC_req_same_size_fixed = 0,
+};
+
+static const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .urgent_latency_us = 4.0,
+ .writeback_latency_us = 12.0,
+ .ideal_dram_bw_after_urgent_percent = 80.0,
+ .max_request_size_bytes = 256,
+ .downspread_percent = 0.5,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 128,
+ .urgent_out_of_order_return_per_channel_bytes = 256,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 2,
+ .vmm_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 17.0,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+};
+
static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
{
- if (project == DML_PROJECT_RAVEN1) {
- soc->sr_exit_time_us = 9.0;
- soc->sr_enter_plus_exit_time_us = 11.0;
- soc->urgent_latency_us = 4.0;
- soc->writeback_latency_us = 12.0;
- soc->ideal_dram_bw_after_urgent_percent = 80.0;
- soc->max_request_size_bytes = 256;
- soc->downspread_percent = 0.5;
- soc->dram_page_open_time_ns = 50.0;
- soc->dram_rw_turnaround_time_ns = 17.5;
- soc->dram_return_buffer_per_channel_bytes = 8192;
- soc->round_trip_ping_latency_dcfclk_cycles = 128;
- soc->urgent_out_of_order_return_per_channel_bytes = 256;
- soc->channel_interleave_bytes = 256;
- soc->num_banks = 8;
- soc->num_chans = 2;
- soc->vmm_page_size_bytes = 4096;
- soc->dram_clock_change_latency_us = 17.0;
- soc->writeback_dram_clock_change_latency_us = 23.0;
- soc->return_bus_width_bytes = 64;
- } else {
- BREAK_TO_DEBUGGER(); /* Invalid Project Specified */
+ switch (project) {
+ case DML_PROJECT_RAVEN1:
+ *soc = dcn1_0_soc;
+ break;
+ default:
+ ASSERT(0);
+ break;
}
}
static void set_ip_params(struct _vcs_dpi_ip_params_st *ip, enum dml_project project)
{
- if (project == DML_PROJECT_RAVEN1) {
- ip->rob_buffer_size_kbytes = 64;
- ip->det_buffer_size_kbytes = 164;
- ip->dpte_buffer_size_in_pte_reqs = 42;
- ip->dpp_output_buffer_pixels = 2560;
- ip->opp_output_buffer_lines = 1;
- ip->pixel_chunk_size_kbytes = 8;
- ip->pte_enable = 1;
- ip->pte_chunk_size_kbytes = 2;
- ip->meta_chunk_size_kbytes = 2;
- ip->writeback_chunk_size_kbytes = 2;
- ip->line_buffer_size_bits = 589824;
- ip->max_line_buffer_lines = 12;
- ip->IsLineBufferBppFixed = 0;
- ip->LineBufferFixedBpp = -1;
- ip->writeback_luma_buffer_size_kbytes = 12;
- ip->writeback_chroma_buffer_size_kbytes = 8;
- ip->max_num_dpp = 4;
- ip->max_num_wb = 2;
- ip->max_dchub_pscl_bw_pix_per_clk = 4;
- ip->max_pscl_lb_bw_pix_per_clk = 2;
- ip->max_lb_vscl_bw_pix_per_clk = 4;
- ip->max_vscl_hscl_bw_pix_per_clk = 4;
- ip->max_hscl_ratio = 4;
- ip->max_vscl_ratio = 4;
- ip->hscl_mults = 4;
- ip->vscl_mults = 4;
- ip->max_hscl_taps = 8;
- ip->max_vscl_taps = 8;
- ip->dispclk_ramp_margin_percent = 1;
- ip->underscan_factor = 1.10;
- ip->min_vblank_lines = 14;
- ip->dppclk_delay_subtotal = 90;
- ip->dispclk_delay_subtotal = 42;
- ip->dcfclk_cstate_latency = 10;
- ip->max_inter_dcn_tile_repeaters = 8;
- ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0;
- ip->bug_forcing_LC_req_same_size_fixed = 0;
- } else {
- BREAK_TO_DEBUGGER(); /* Invalid Project Specified */
+ switch (project) {
+ case DML_PROJECT_RAVEN1:
+ *ip = dcn1_0_ip;
+ break;
+ default:
+ ASSERT(0);
+ break;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 09affa1..ce750ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -215,8 +215,8 @@ struct writeback_st {
int wb_vtaps_luma;
int wb_htaps_chroma;
int wb_vtaps_chroma;
- int wb_hratio;
- int wb_vratio;
+ double wb_hratio;
+ double wb_vratio;
};
struct _vcs_dpi_display_output_params_st {
@@ -224,6 +224,7 @@ struct _vcs_dpi_display_output_params_st {
int output_bpp;
int dsc_enable;
int wb_enable;
+ int num_active_wb;
int opp_input_bpc;
int output_type;
int output_format;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index 87b580f..61fe484 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -75,6 +75,9 @@ bool dal_hw_factory_init(
return true;
case DCE_VERSION_11_0:
case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+#endif
dal_hw_factory_dce110_init(factory);
return true;
case DCE_VERSION_12_0:
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 0ae8ace..910ae2b7 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -72,6 +72,9 @@ bool dal_hw_translate_init(
case DCE_VERSION_10_0:
case DCE_VERSION_11_0:
case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+#endif
dal_hw_translate_dce110_init(translate);
return true;
case DCE_VERSION_12_0:
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
index abd0095..b7256f5 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
@@ -527,7 +527,7 @@ static void construct(
REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
if (xtal_ref_div == 0) {
- DC_LOG_WARNING("Invalid base timer divider\n",
+ DC_LOG_WARNING("Invalid base timer divider [%s]\n",
__func__);
xtal_ref_div = 2;
}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
index 5cbf662..c3d7c32 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -83,6 +83,9 @@ struct i2caux *dal_i2caux_create(
case DCE_VERSION_8_3:
return dal_i2caux_dce80_create(ctx);
case DCE_VERSION_11_2:
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ case DCE_VERSION_11_22:
+#endif
return dal_i2caux_dce112_create(ctx);
case DCE_VERSION_11_0:
return dal_i2caux_dce110_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 8c51ad7..a94942d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -95,11 +95,6 @@ struct resource_funcs {
struct link_encoder *(*link_enc_create)(
const struct encoder_init_data *init);
- enum dc_status (*validate_guaranteed)(
- struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_state *context);
-
bool (*validate_bandwidth)(
struct dc *dc,
struct dc_state *context);
@@ -250,6 +245,7 @@ struct dce_bw_output {
bool all_displays_in_sync;
struct dce_watermarks urgent_wm_ns[MAX_PIPES];
struct dce_watermarks stutter_exit_wm_ns[MAX_PIPES];
+ struct dce_watermarks stutter_entry_wm_ns[MAX_PIPES];
struct dce_watermarks nbp_state_change_wm_ns[MAX_PIPES];
int sclk_khz;
int sclk_deep_sleep_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 090b7a8..30b3a08 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -102,13 +102,14 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
-ssize_t dal_ddc_service_read_dpcd_data(
+enum ddc_result dal_ddc_service_read_dpcd_data(
struct ddc_service *ddc,
bool i2c,
enum i2c_mot_mode mot,
uint32_t address,
uint8_t *data,
- uint32_t len);
+ uint32_t len,
+ uint32_t *read);
enum ddc_result dal_ddc_service_write_dpcd_data(
struct ddc_service *ddc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
index a9bfe9f..933ea7a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
@@ -42,6 +42,10 @@ enum bw_calcs_version {
BW_CALCS_VERSION_CARRIZO,
BW_CALCS_VERSION_POLARIS10,
BW_CALCS_VERSION_POLARIS11,
+ BW_CALCS_VERSION_POLARIS12,
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ BW_CALCS_VERSION_VEGAM,
+#endif
BW_CALCS_VERSION_STONEY,
BW_CALCS_VERSION_VEGA10
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
new file mode 100644
index 0000000..02f757d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCHUBBUB_H__
+#define __DAL_DCHUBBUB_H__
+
+
+enum dcc_control {
+ dcc_control__256_256_xxx,
+ dcc_control__128_128_xxx,
+ dcc_control__256_64_64,
+};
+
+enum segment_order {
+ segment_order__na,
+ segment_order__contiguous,
+ segment_order__non_contiguous,
+};
+
+
+struct hubbub_funcs {
+ void (*update_dchub)(
+ struct hubbub *hubbub,
+ struct dchub_init_data *dh_data);
+
+ bool (*get_dcc_compression_cap)(struct hubbub *hubbub,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output);
+
+ bool (*dcc_support_swizzle)(
+ enum swizzle_mode_values swizzle,
+ unsigned int bytes_per_element,
+ enum segment_order *segment_order_horz,
+ enum segment_order *segment_order_vert);
+
+ bool (*dcc_support_pixel_format)(
+ enum surface_pixel_format format,
+ unsigned int *bytes_per_element);
+};
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 9999560..582458f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -44,7 +44,23 @@ struct dpp_grph_csc_adjustment {
enum graphics_gamut_adjust_type gamut_adjust_type;
};
+struct dcn_dpp_state {
+ uint32_t igam_lut_mode;
+ uint32_t igam_input_format;
+ uint32_t dgam_lut_mode;
+ uint32_t rgam_lut_mode;
+ uint32_t gamut_remap_mode;
+ uint32_t gamut_remap_c11_c12;
+ uint32_t gamut_remap_c13_c14;
+ uint32_t gamut_remap_c21_c22;
+ uint32_t gamut_remap_c23_c24;
+ uint32_t gamut_remap_c31_c32;
+ uint32_t gamut_remap_c33_c34;
+};
+
struct dpp_funcs {
+ void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s);
+
void (*dpp_reset)(struct dpp *dpp);
void (*dpp_set_scaler)(struct dpp *dpp,
@@ -117,7 +133,7 @@ struct dpp_funcs {
struct dpp *dpp_base,
enum surface_pixel_format format,
enum expansion_mode mode,
- struct csc_transform input_csc_color_matrix,
+ struct dc_csc_transform input_csc_color_matrix,
enum dc_color_space input_color_space);
void (*dpp_full_bypass)(struct dpp *dpp_base);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 9ced254..331f8ff 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -56,7 +56,6 @@ struct hubp {
bool power_gated;
};
-
struct hubp_funcs {
void (*hubp_setup)(
struct hubp *hubp,
@@ -121,6 +120,7 @@ struct hubp_funcs {
void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
+ void (*hubp_read_state)(struct hubp *hubp);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index b221581..cf7433e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -140,11 +140,6 @@ enum opp_regamma {
OPP_REGAMMA_USER
};
-struct csc_transform {
- uint16_t matrix[12];
- bool enable_adjustment;
-};
-
struct dc_bias_and_scale {
uint16_t scale_red;
uint16_t bias_red;
@@ -191,4 +186,9 @@ enum controller_dp_test_pattern {
CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
};
+enum dc_lut_mode {
+ LUT_BYPASS,
+ LUT_RAM_A,
+ LUT_RAM_B
+};
#endif /* __DAL_HW_SHARED_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
index 2109eac..b2fa4c4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
@@ -87,7 +87,7 @@ struct ipp_funcs {
struct input_pixel_processor *ipp,
enum surface_pixel_format format,
enum expansion_mode mode,
- struct csc_transform input_csc_color_matrix,
+ struct dc_csc_transform input_csc_color_matrix,
enum dc_color_space input_color_space);
/* DCE function to setup IPP. TODO: see if we can consolidate to setup */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 54d8a13..cf6df2e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -149,6 +149,7 @@ struct link_encoder_funcs {
bool connect);
void (*enable_hpd)(struct link_encoder *enc);
void (*disable_hpd)(struct link_encoder *enc);
+ bool (*is_dig_enabled)(struct link_encoder *enc);
void (*destroy)(struct link_encoder **enc);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 3e1e7e6..47f1dc5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -104,6 +104,7 @@ struct mem_input_funcs {
struct mem_input *mem_input,
struct dce_watermarks nbp,
struct dce_watermarks stutter,
+ struct dce_watermarks stutter_enter,
struct dce_watermarks urgent,
uint32_t total_dest_line_time_ns);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 23a8d5e..caf74e3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -105,7 +105,24 @@ struct mpc {
struct mpcc mpcc_array[MAX_MPCC];
};
+struct mpcc_state {
+ uint32_t opp_id;
+ uint32_t dpp_id;
+ uint32_t bot_mpcc_id;
+ uint32_t mode;
+ uint32_t alpha_mode;
+ uint32_t pre_multiplied_alpha;
+ uint32_t overlap_only;
+ uint32_t idle;
+ uint32_t busy;
+};
+
struct mpc_funcs {
+ void (*read_mpcc_state)(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct mpcc_state *s);
+
/*
* Insert DPP into MPC tree based on specified blending position.
* Only used for planes that are part of blending chain for OPP output
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index b5db169..cfa7ec9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -29,31 +29,40 @@
#define STREAM_ENCODER_H_
#include "audio_types.h"
+#include "hw_shared.h"
struct dc_bios;
struct dc_context;
struct dc_crtc_timing;
-struct encoder_info_packet {
- bool valid;
- uint8_t hb0;
- uint8_t hb1;
- uint8_t hb2;
- uint8_t hb3;
- uint8_t sb[32];
+enum dp_pixel_encoding_type {
+ DP_PIXEL_ENCODING_TYPE_RGB444 = 0x00000000,
+ DP_PIXEL_ENCODING_TYPE_YCBCR422 = 0x00000001,
+ DP_PIXEL_ENCODING_TYPE_YCBCR444 = 0x00000002,
+ DP_PIXEL_ENCODING_TYPE_RGB_WIDE_GAMUT = 0x00000003,
+ DP_PIXEL_ENCODING_TYPE_Y_ONLY = 0x00000004,
+ DP_PIXEL_ENCODING_TYPE_YCBCR420 = 0x00000005
+};
+
+enum dp_component_depth {
+ DP_COMPONENT_PIXEL_DEPTH_6BPC = 0x00000000,
+ DP_COMPONENT_PIXEL_DEPTH_8BPC = 0x00000001,
+ DP_COMPONENT_PIXEL_DEPTH_10BPC = 0x00000002,
+ DP_COMPONENT_PIXEL_DEPTH_12BPC = 0x00000003,
+ DP_COMPONENT_PIXEL_DEPTH_16BPC = 0x00000004
};
struct encoder_info_frame {
/* auxiliary video information */
- struct encoder_info_packet avi;
- struct encoder_info_packet gamut;
- struct encoder_info_packet vendor;
+ struct dc_info_packet avi;
+ struct dc_info_packet gamut;
+ struct dc_info_packet vendor;
/* source product description */
- struct encoder_info_packet spd;
+ struct dc_info_packet spd;
/* video stream configuration */
- struct encoder_info_packet vsc;
+ struct dc_info_packet vsc;
/* HDR Static MetaData */
- struct encoder_info_packet hdrsmd;
+ struct dc_info_packet hdrsmd;
};
struct encoder_unblank_param {
@@ -147,6 +156,7 @@ struct stream_encoder_funcs {
void (*set_avmute)(
struct stream_encoder *enc, bool enable);
+
};
#endif /* STREAM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 3217b5b..69cb0a1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -140,6 +140,9 @@ struct timing_generator_funcs {
void (*program_timing)(struct timing_generator *tg,
const struct dc_crtc_timing *timing,
bool use_vbios);
+ void (*program_vline_interrupt)(struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing,
+ unsigned long long vsync_delta);
bool (*enable_crtc)(struct timing_generator *tg);
bool (*disable_crtc)(struct timing_generator *tg);
bool (*is_counter_moving)(struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index c5b3623..fecc80c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -252,7 +252,7 @@ struct transform_funcs {
struct transform *xfm_base,
enum surface_pixel_format format,
enum expansion_mode mode,
- struct csc_transform input_csc_color_matrix,
+ struct dc_csc_transform input_csc_color_matrix,
enum dc_color_space input_color_space);
void (*ipp_full_bypass)(struct transform *xfm_base);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index e764cba..29abf3e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,6 +32,8 @@
#include "inc/hw/link_encoder.h"
#include "core_status.h"
+#define EDP_BACKLIGHT_RAMP_DISABLE_LEVEL 0xFFFFFFFF
+
enum pipe_gating_control {
PIPE_GATING_CONTROL_DISABLE = 0,
PIPE_GATING_CONTROL_ENABLE,
@@ -63,6 +65,7 @@ struct dchub_init_data;
struct dc_static_screen_events;
struct resource_pool;
struct resource_context;
+struct stream_resource;
struct hw_sequencer_funcs {
@@ -93,6 +96,12 @@ struct hw_sequencer_funcs {
enum dc_color_space colorspace,
uint16_t *matrix);
+ void (*program_output_csc)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id);
+
void (*update_plane_addr)(
const struct dc *dc,
struct pipe_ctx *pipe_ctx);
@@ -154,6 +163,11 @@ struct hw_sequencer_funcs {
struct dc *dc,
struct pipe_ctx *pipe,
bool lock);
+ void (*blank_pixel_data)(
+ struct dc *dc,
+ struct stream_resource *stream_res,
+ struct dc_stream_state *stream,
+ bool blank);
void (*set_bandwidth)(
struct dc *dc,
@@ -169,7 +183,7 @@ struct hw_sequencer_funcs {
void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
int num_pipes, const struct dc_static_screen_events *events);
- enum dc_status (*prog_pixclk_crtc_otg)(
+ enum dc_status (*enable_stream_timing)(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct dc *dc);
@@ -201,6 +215,7 @@ struct hw_sequencer_funcs {
void (*set_cursor_position)(struct pipe_ctx *pipe);
void (*set_cursor_attribute)(struct pipe_ctx *pipe);
+
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index 77eb728..3306e7b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -183,6 +183,36 @@
FN(reg_name, f4), v4, \
FN(reg_name, f5), v5)
+#define REG_GET_6(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6) \
+ generic_reg_get6(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2, \
+ FN(reg_name, f3), v3, \
+ FN(reg_name, f4), v4, \
+ FN(reg_name, f5), v5, \
+ FN(reg_name, f6), v6)
+
+#define REG_GET_7(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7) \
+ generic_reg_get7(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2, \
+ FN(reg_name, f3), v3, \
+ FN(reg_name, f4), v4, \
+ FN(reg_name, f5), v5, \
+ FN(reg_name, f6), v6, \
+ FN(reg_name, f7), v7)
+
+#define REG_GET_8(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8) \
+ generic_reg_get8(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2, \
+ FN(reg_name, f3), v3, \
+ FN(reg_name, f4), v4, \
+ FN(reg_name, f5), v5, \
+ FN(reg_name, f6), v6, \
+ FN(reg_name, f7), v7, \
+ FN(reg_name, f8), v8)
+
/* macro to poll and wait for a register field to read back given value */
#define REG_WAIT(reg_name, field, val, delay_between_poll_us, max_try) \
@@ -389,4 +419,30 @@ uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
uint8_t shift5, uint32_t mask5, uint32_t *field_value5);
+uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+ uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
+ uint8_t shift6, uint32_t mask6, uint32_t *field_value6);
+
+uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+ uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
+ uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
+ uint8_t shift7, uint32_t mask7, uint32_t *field_value7);
+
+uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+ uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
+ uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
+ uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
+ uint8_t shift8, uint32_t mask8, uint32_t *field_value8);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 5467332f..640a647 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -139,10 +139,6 @@ bool resource_validate_attach_surfaces(
struct dc_state *context,
const struct resource_pool *pool);
-void validate_guaranteed_copy_streams(
- struct dc_state *context,
- int max_streams);
-
void resource_validate_ctx_update_pointer_after_copy(
const struct dc_state *src_ctx,
struct dc_state *dst_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
index a506c2e..cc3b1bc 100644
--- a/drivers/gpu/drm/amd/display/dc/irq_types.h
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -135,6 +135,13 @@ enum dc_irq_source {
DC_IRQ_SOURCE_VBLANK5,
DC_IRQ_SOURCE_VBLANK6,
+ DC_IRQ_SOURCE_DC1_VLINE0,
+ DC_IRQ_SOURCE_DC2_VLINE0,
+ DC_IRQ_SOURCE_DC3_VLINE0,
+ DC_IRQ_SOURCE_DC4_VLINE0,
+ DC_IRQ_SOURCE_DC5_VLINE0,
+ DC_IRQ_SOURCE_DC6_VLINE0,
+
DAL_IRQ_SOURCES_NUMBER
};
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 9831cb5..1b987b6 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -98,7 +98,14 @@
(eChipRev < VI_POLARIS11_M_A0))
#define ASIC_REV_IS_POLARIS11_M(eChipRev) ((eChipRev >= VI_POLARIS11_M_A0) && \
(eChipRev < VI_POLARIS12_V_A0))
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+#define VI_VEGAM_A0 110
+#define ASIC_REV_IS_POLARIS12_V(eChipRev) ((eChipRev >= VI_POLARIS12_V_A0) && \
+ (eChipRev < VI_VEGAM_A0))
+#define ASIC_REV_IS_VEGAM(eChipRev) (eChipRev >= VI_VEGAM_A0)
+#else
#define ASIC_REV_IS_POLARIS12_V(eChipRev) (eChipRev >= VI_POLARIS12_V_A0)
+#endif
/* DCE11 */
#define CZ_CARRIZO_A0 0x01
@@ -113,9 +120,14 @@
#define AI_GREENLAND_P_A0 1
#define AI_GREENLAND_P_A1 2
+#define AI_UNKNOWN 0xFF
-#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_UNKNOWN)
-#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_UNKNOWN)
+#define AI_VEGA12_P_A0 20
+#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_VEGA12_P_A0)
+#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_VEGA12_P_A0)
+
+#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
+#define ASICREV_IS_VEGA12_p(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
/* DCN1_0 */
#define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index fa54396..5b1f8ce 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -40,6 +40,9 @@ enum dce_version {
DCE_VERSION_10_0,
DCE_VERSION_11_0,
DCE_VERSION_11_2,
+#if defined(CONFIG_DRM_AMD_DC_VEGAM)
+ DCE_VERSION_11_22,
+#endif
DCE_VERSION_12_0,
DCE_VERSION_MAX,
DCN_VERSION_1_0,
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 0de2586..16cbdb4 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -26,8 +26,6 @@
#ifndef __DAL_FIXED31_32_H__
#define __DAL_FIXED31_32_H__
-#include "os_types.h"
-
#define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
/*
@@ -44,7 +42,7 @@
*/
struct fixed31_32 {
- int64_t value;
+ long long value;
};
/*
@@ -73,15 +71,15 @@ static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL };
* result = numerator / denominator
*/
struct fixed31_32 dal_fixed31_32_from_fraction(
- int64_t numerator,
- int64_t denominator);
+ long long numerator,
+ long long denominator);
/*
* @brief
* result = arg
*/
-struct fixed31_32 dal_fixed31_32_from_int_nonconst(int64_t arg);
-static inline struct fixed31_32 dal_fixed31_32_from_int(int64_t arg)
+struct fixed31_32 dal_fixed31_32_from_int_nonconst(long long arg);
+static inline struct fixed31_32 dal_fixed31_32_from_int(long long arg)
{
if (__builtin_constant_p(arg)) {
struct fixed31_32 res;
@@ -213,7 +211,7 @@ static inline struct fixed31_32 dal_fixed31_32_clamp(
*/
struct fixed31_32 dal_fixed31_32_shl(
struct fixed31_32 arg,
- uint8_t shift);
+ unsigned char shift);
/*
* @brief
@@ -221,7 +219,7 @@ struct fixed31_32 dal_fixed31_32_shl(
*/
static inline struct fixed31_32 dal_fixed31_32_shr(
struct fixed31_32 arg,
- uint8_t shift)
+ unsigned char shift)
{
struct fixed31_32 res;
res.value = arg.value >> shift;
@@ -246,7 +244,7 @@ struct fixed31_32 dal_fixed31_32_add(
* result = arg1 + arg2
*/
static inline struct fixed31_32 dal_fixed31_32_add_int(struct fixed31_32 arg1,
- int32_t arg2)
+ int arg2)
{
return dal_fixed31_32_add(arg1,
dal_fixed31_32_from_int(arg2));
@@ -265,7 +263,7 @@ struct fixed31_32 dal_fixed31_32_sub(
* result = arg1 - arg2
*/
static inline struct fixed31_32 dal_fixed31_32_sub_int(struct fixed31_32 arg1,
- int32_t arg2)
+ int arg2)
{
return dal_fixed31_32_sub(arg1,
dal_fixed31_32_from_int(arg2));
@@ -291,7 +289,7 @@ struct fixed31_32 dal_fixed31_32_mul(
* result = arg1 * arg2
*/
static inline struct fixed31_32 dal_fixed31_32_mul_int(struct fixed31_32 arg1,
- int32_t arg2)
+ int arg2)
{
return dal_fixed31_32_mul(arg1,
dal_fixed31_32_from_int(arg2));
@@ -309,7 +307,7 @@ struct fixed31_32 dal_fixed31_32_sqr(
* result = arg1 / arg2
*/
static inline struct fixed31_32 dal_fixed31_32_div_int(struct fixed31_32 arg1,
- int64_t arg2)
+ long long arg2)
{
return dal_fixed31_32_from_fraction(arg1.value,
dal_fixed31_32_from_int(arg2).value);
@@ -434,21 +432,21 @@ struct fixed31_32 dal_fixed31_32_pow(
* @brief
* result = floor(arg) := greatest integer lower than or equal to arg
*/
-int32_t dal_fixed31_32_floor(
+int dal_fixed31_32_floor(
struct fixed31_32 arg);
/*
* @brief
* result = round(arg) := integer nearest to arg
*/
-int32_t dal_fixed31_32_round(
+int dal_fixed31_32_round(
struct fixed31_32 arg);
/*
* @brief
* result = ceil(arg) := lowest integer greater than or equal to arg
*/
-int32_t dal_fixed31_32_ceil(
+int dal_fixed31_32_ceil(
struct fixed31_32 arg);
/* the following two function are used in scaler hw programming to convert fixed
@@ -457,20 +455,20 @@ int32_t dal_fixed31_32_ceil(
* fractional
*/
-uint32_t dal_fixed31_32_u2d19(
+unsigned int dal_fixed31_32_u2d19(
struct fixed31_32 arg);
-uint32_t dal_fixed31_32_u0d19(
+unsigned int dal_fixed31_32_u0d19(
struct fixed31_32 arg);
-uint32_t dal_fixed31_32_clamp_u0d14(
+unsigned int dal_fixed31_32_clamp_u0d14(
struct fixed31_32 arg);
-uint32_t dal_fixed31_32_clamp_u0d10(
+unsigned int dal_fixed31_32_clamp_u0d10(
struct fixed31_32 arg);
-int32_t dal_fixed31_32_s4d19(
+int dal_fixed31_32_s4d19(
struct fixed31_32 arg);
#endif
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 427796b..b608a08 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -29,39 +29,39 @@
#include "os_types.h"
#define MAX_NAME_LEN 32
-#define DC_LOG_ERROR(a, ...) dm_logger_write(DC_LOGGER, LOG_ERROR, a, ## __VA_ARGS__)
-#define DC_LOG_WARNING(a, ...) dm_logger_write(DC_LOGGER, LOG_WARNING, a, ## __VA_ARGS__)
-#define DC_LOG_DEBUG(a, ...) dm_logger_write(DC_LOGGER, LOG_DEBUG, a, ## __VA_ARGS__)
-#define DC_LOG_DC(a, ...) dm_logger_write(DC_LOGGER, LOG_DC, a, ## __VA_ARGS__)
-#define DC_LOG_DTN(a, ...) dm_logger_write(DC_LOGGER, LOG_DTN, a, ## __VA_ARGS__)
-#define DC_LOG_SURFACE(a, ...) dm_logger_write(DC_LOGGER, LOG_SURFACE, a, ## __VA_ARGS__)
-#define DC_LOG_HW_HOTPLUG(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_HOTPLUG, a, ## __VA_ARGS__)
-#define DC_LOG_HW_LINK_TRAINING(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_LINK_TRAINING, a, ## __VA_ARGS__)
-#define DC_LOG_HW_SET_MODE(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_SET_MODE, a, ## __VA_ARGS__)
-#define DC_LOG_HW_RESUME_S3(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_RESUME_S3, a, ## __VA_ARGS__)
-#define DC_LOG_HW_AUDIO(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_AUDIO, a, ## __VA_ARGS__)
-#define DC_LOG_HW_HPD_IRQ(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_HPD_IRQ, a, ## __VA_ARGS__)
-#define DC_LOG_MST(a, ...) dm_logger_write(DC_LOGGER, LOG_MST, a, ## __VA_ARGS__)
-#define DC_LOG_SCALER(a, ...) dm_logger_write(DC_LOGGER, LOG_SCALER, a, ## __VA_ARGS__)
-#define DC_LOG_BIOS(a, ...) dm_logger_write(DC_LOGGER, LOG_BIOS, a, ## __VA_ARGS__)
-#define DC_LOG_BANDWIDTH_CALCS(a, ...) dm_logger_write(DC_LOGGER, LOG_BANDWIDTH_CALCS, a, ## __VA_ARGS__)
-#define DC_LOG_BANDWIDTH_VALIDATION(a, ...) dm_logger_write(DC_LOGGER, LOG_BANDWIDTH_VALIDATION, a, ## __VA_ARGS__)
-#define DC_LOG_I2C_AUX(a, ...) dm_logger_write(DC_LOGGER, LOG_I2C_AUX, a, ## __VA_ARGS__)
-#define DC_LOG_SYNC(a, ...) dm_logger_write(DC_LOGGER, LOG_SYNC, a, ## __VA_ARGS__)
-#define DC_LOG_BACKLIGHT(a, ...) dm_logger_write(DC_LOGGER, LOG_BACKLIGHT, a, ## __VA_ARGS__)
-#define DC_LOG_FEATURE_OVERRIDE(a, ...) dm_logger_write(DC_LOGGER, LOG_FEATURE_OVERRIDE, a, ## __VA_ARGS__)
-#define DC_LOG_DETECTION_EDID_PARSER(a, ...) dm_logger_write(DC_LOGGER, LOG_DETECTION_EDID_PARSER, a, ## __VA_ARGS__)
-#define DC_LOG_DETECTION_DP_CAPS(a, ...) dm_logger_write(DC_LOGGER, LOG_DETECTION_DP_CAPS, a, ## __VA_ARGS__)
-#define DC_LOG_RESOURCE(a, ...) dm_logger_write(DC_LOGGER, LOG_RESOURCE, a, ## __VA_ARGS__)
-#define DC_LOG_DML(a, ...) dm_logger_write(DC_LOGGER, LOG_DML, a, ## __VA_ARGS__)
-#define DC_LOG_EVENT_MODE_SET(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_MODE_SET, a, ## __VA_ARGS__)
-#define DC_LOG_EVENT_DETECTION(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_DETECTION, a, ## __VA_ARGS__)
-#define DC_LOG_EVENT_LINK_TRAINING(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_LINK_TRAINING, a, ## __VA_ARGS__)
-#define DC_LOG_EVENT_LINK_LOSS(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_LINK_LOSS, a, ## __VA_ARGS__)
-#define DC_LOG_EVENT_UNDERFLOW(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_UNDERFLOW, a, ## __VA_ARGS__)
-#define DC_LOG_IF_TRACE(a, ...) dm_logger_write(DC_LOGGER, LOG_IF_TRACE, a, ## __VA_ARGS__)
-#define DC_LOG_PERF_TRACE(a, ...) dm_logger_write(DC_LOGGER, LOG_PERF_TRACE, a, ## __VA_ARGS__)
+#define DC_LOG_ERROR(...) DRM_ERROR(__VA_ARGS__)
+#define DC_LOG_WARNING(...) DRM_WARN(__VA_ARGS__)
+#define DC_LOG_DEBUG(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_DC(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_DTN(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_SURFACE(...) pr_debug("[SURFACE]:"__VA_ARGS__)
+#define DC_LOG_HW_HOTPLUG(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_HW_LINK_TRAINING(...) pr_debug("[HW_LINK_TRAINING]:"__VA_ARGS__)
+#define DC_LOG_HW_SET_MODE(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_HW_RESUME_S3(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_HW_AUDIO(...) pr_debug("[HW_AUDIO]:"__VA_ARGS__)
+#define DC_LOG_HW_HPD_IRQ(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_MST(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_SCALER(...) pr_debug("[SCALER]:"__VA_ARGS__)
+#define DC_LOG_BIOS(...) pr_debug("[BIOS]:"__VA_ARGS__)
+#define DC_LOG_BANDWIDTH_CALCS(...) pr_debug("[BANDWIDTH_CALCS]:"__VA_ARGS__)
+#define DC_LOG_BANDWIDTH_VALIDATION(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_I2C_AUX(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_SYNC(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_BACKLIGHT(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_FEATURE_OVERRIDE(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_DETECTION_EDID_PARSER(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_DETECTION_DP_CAPS(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_RESOURCE(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_DML(...) pr_debug("[DML]:"__VA_ARGS__)
+#define DC_LOG_EVENT_MODE_SET(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_EVENT_DETECTION(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_EVENT_LINK_TRAINING(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_EVENT_LINK_LOSS(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_EVENT_UNDERFLOW(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define DC_LOG_IF_TRACE(...) pr_debug("[IF_TRACE]:"__VA_ARGS__)
+#define DC_LOG_PERF_TRACE(...) DRM_DEBUG_KMS(__VA_ARGS__)
struct dal_logger;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index e7e374f..15e5b72 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -185,14 +185,14 @@ struct dividers {
static void build_coefficients(struct gamma_coefficients *coefficients, bool is_2_4)
{
- static const int32_t numerator01[] = { 31308, 180000};
- static const int32_t numerator02[] = { 12920, 4500};
- static const int32_t numerator03[] = { 55, 99};
- static const int32_t numerator04[] = { 55, 99};
- static const int32_t numerator05[] = { 2400, 2200};
+ static const int32_t numerator01[] = { 31308, 180000};
+ static const int32_t numerator02[] = { 12920, 4500};
+ static const int32_t numerator03[] = { 55, 99};
+ static const int32_t numerator04[] = { 55, 99};
+ static const int32_t numerator05[] = { 2400, 2200};
- uint32_t i = 0;
- uint32_t index = is_2_4 == true ? 0:1;
+ uint32_t i = 0;
+ uint32_t index = is_2_4 == true ? 0:1;
do {
coefficients->a0[i] = dal_fixed31_32_from_fraction(
@@ -691,7 +691,7 @@ static void build_degamma(struct pwl_float_data_ex *curve,
}
}
-static bool scale_gamma(struct pwl_float_data *pwl_rgb,
+static void scale_gamma(struct pwl_float_data *pwl_rgb,
const struct dc_gamma *ramp,
struct dividers dividers)
{
@@ -752,11 +752,9 @@ static bool scale_gamma(struct pwl_float_data *pwl_rgb,
dividers.divider3);
rgb->b = dal_fixed31_32_mul(rgb_last->b,
dividers.divider3);
-
- return true;
}
-static bool scale_gamma_dx(struct pwl_float_data *pwl_rgb,
+static void scale_gamma_dx(struct pwl_float_data *pwl_rgb,
const struct dc_gamma *ramp,
struct dividers dividers)
{
@@ -818,8 +816,71 @@ static bool scale_gamma_dx(struct pwl_float_data *pwl_rgb,
pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g);
pwl_rgb[i].b = dal_fixed31_32_sub(dal_fixed31_32_mul_int(
pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b);
+}
- return true;
+/* todo: all these scale_gamma functions are inherently the same but
+ * take different structures as params or different format for ramp
+ * values. We could probably implement it in a more generic fashion
+ */
+static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb,
+ const struct regamma_ramp *ramp,
+ struct dividers dividers)
+{
+ unsigned short max_driver = 0xFFFF;
+ unsigned short max_os = 0xFF00;
+ unsigned short scaler = max_os;
+ uint32_t i;
+ struct pwl_float_data *rgb = pwl_rgb;
+ struct pwl_float_data *rgb_last = rgb + GAMMA_RGB_256_ENTRIES - 1;
+
+ i = 0;
+ do {
+ if (ramp->gamma[i] > max_os ||
+ ramp->gamma[i + 256] > max_os ||
+ ramp->gamma[i + 512] > max_os) {
+ scaler = max_driver;
+ break;
+ }
+ i++;
+ } while (i != GAMMA_RGB_256_ENTRIES);
+
+ i = 0;
+ do {
+ rgb->r = dal_fixed31_32_from_fraction(
+ ramp->gamma[i], scaler);
+ rgb->g = dal_fixed31_32_from_fraction(
+ ramp->gamma[i + 256], scaler);
+ rgb->b = dal_fixed31_32_from_fraction(
+ ramp->gamma[i + 512], scaler);
+
+ ++rgb;
+ ++i;
+ } while (i != GAMMA_RGB_256_ENTRIES);
+
+ rgb->r = dal_fixed31_32_mul(rgb_last->r,
+ dividers.divider1);
+ rgb->g = dal_fixed31_32_mul(rgb_last->g,
+ dividers.divider1);
+ rgb->b = dal_fixed31_32_mul(rgb_last->b,
+ dividers.divider1);
+
+ ++rgb;
+
+ rgb->r = dal_fixed31_32_mul(rgb_last->r,
+ dividers.divider2);
+ rgb->g = dal_fixed31_32_mul(rgb_last->g,
+ dividers.divider2);
+ rgb->b = dal_fixed31_32_mul(rgb_last->b,
+ dividers.divider2);
+
+ ++rgb;
+
+ rgb->r = dal_fixed31_32_mul(rgb_last->r,
+ dividers.divider3);
+ rgb->g = dal_fixed31_32_mul(rgb_last->g,
+ dividers.divider3);
+ rgb->b = dal_fixed31_32_mul(rgb_last->b,
+ dividers.divider3);
}
/*
@@ -949,7 +1010,7 @@ static inline void copy_rgb_regamma_to_coordinates_x(
uint32_t i = 0;
const struct pwl_float_data_ex *rgb_regamma = rgb_ex;
- while (i <= hw_points_num) {
+ while (i <= hw_points_num + 1) {
coords->regamma_y_red = rgb_regamma->r;
coords->regamma_y_green = rgb_regamma->g;
coords->regamma_y_blue = rgb_regamma->b;
@@ -1002,6 +1063,102 @@ static bool calculate_interpolated_hardware_curve(
return true;
}
+/* The "old" interpolation uses a complicated scheme to build an array of
+ * coefficients while also using an array of 0-255 normalized to 0-1
+ * Then there's another loop using both of the above + new scaled user ramp
+ * and we concatenate them. It also searches for points of interpolation and
+ * uses enums for positions.
+ *
+ * This function uses a different approach:
+ * user ramp is always applied on X with 0/255, 1/255, 2/255, ..., 255/255
+ * To find index for hwX , we notice the following:
+ * i/255 <= hwX < (i+1)/255 <=> i <= 255*hwX < i+1
+ * See apply_lut_1d which is the same principle, but on 4K entry 1D LUT
+ *
+ * Once the index is known, combined Y is simply:
+ * user_ramp(index) + (hwX-index/255)*(user_ramp(index+1) - user_ramp(index)
+ *
+ * We should switch to this method in all cases, it's simpler and faster
+ * ToDo one day - for now this only applies to ADL regamma to avoid regression
+ * for regular use cases (sRGB and PQ)
+ */
+static void interpolate_user_regamma(uint32_t hw_points_num,
+ struct pwl_float_data *rgb_user,
+ bool apply_degamma,
+ struct dc_transfer_func_distributed_points *tf_pts)
+{
+ uint32_t i;
+ uint32_t color = 0;
+ int32_t index;
+ int32_t index_next;
+ struct fixed31_32 *tf_point;
+ struct fixed31_32 hw_x;
+ struct fixed31_32 norm_factor =
+ dal_fixed31_32_from_int_nonconst(255);
+ struct fixed31_32 norm_x;
+ struct fixed31_32 index_f;
+ struct fixed31_32 lut1;
+ struct fixed31_32 lut2;
+ struct fixed31_32 delta_lut;
+ struct fixed31_32 delta_index;
+
+ i = 0;
+ /* fixed_pt library has problems handling too small values */
+ while (i != 32) {
+ tf_pts->red[i] = dal_fixed31_32_zero;
+ tf_pts->green[i] = dal_fixed31_32_zero;
+ tf_pts->blue[i] = dal_fixed31_32_zero;
+ ++i;
+ }
+ while (i <= hw_points_num + 1) {
+ for (color = 0; color < 3; color++) {
+ if (color == 0)
+ tf_point = &tf_pts->red[i];
+ else if (color == 1)
+ tf_point = &tf_pts->green[i];
+ else
+ tf_point = &tf_pts->blue[i];
+
+ if (apply_degamma) {
+ if (color == 0)
+ hw_x = coordinates_x[i].regamma_y_red;
+ else if (color == 1)
+ hw_x = coordinates_x[i].regamma_y_green;
+ else
+ hw_x = coordinates_x[i].regamma_y_blue;
+ } else
+ hw_x = coordinates_x[i].x;
+
+ norm_x = dal_fixed31_32_mul(norm_factor, hw_x);
+ index = dal_fixed31_32_floor(norm_x);
+ if (index < 0 || index > 255)
+ continue;
+
+ index_f = dal_fixed31_32_from_int_nonconst(index);
+ index_next = (index == 255) ? index : index + 1;
+
+ if (color == 0) {
+ lut1 = rgb_user[index].r;
+ lut2 = rgb_user[index_next].r;
+ } else if (color == 1) {
+ lut1 = rgb_user[index].g;
+ lut2 = rgb_user[index_next].g;
+ } else {
+ lut1 = rgb_user[index].b;
+ lut2 = rgb_user[index_next].b;
+ }
+
+ // we have everything now, so interpolate
+ delta_lut = dal_fixed31_32_sub(lut2, lut1);
+ delta_index = dal_fixed31_32_sub(norm_x, index_f);
+
+ *tf_point = dal_fixed31_32_add(lut1,
+ dal_fixed31_32_mul(delta_index, delta_lut));
+ }
+ ++i;
+ }
+}
+
static void build_new_custom_resulted_curve(
uint32_t hw_points_num,
struct dc_transfer_func_distributed_points *tf_pts)
@@ -1025,6 +1182,29 @@ static void build_new_custom_resulted_curve(
}
}
+static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma,
+ uint32_t hw_points_num)
+{
+ uint32_t i;
+
+ struct gamma_coefficients coeff;
+ struct pwl_float_data_ex *rgb = rgb_regamma;
+ const struct hw_x_point *coord_x = coordinates_x;
+
+ build_coefficients(&coeff, true);
+
+ i = 0;
+ while (i != hw_points_num + 1) {
+ rgb->r = translate_from_linear_space_ex(
+ coord_x->x, &coeff, 0);
+ rgb->g = rgb->r;
+ rgb->b = rgb->r;
+ ++coord_x;
+ ++rgb;
+ ++i;
+ }
+}
+
static bool map_regamma_hw_to_x_user(
const struct dc_gamma *ramp,
struct pixel_gamma_point *coeff128,
@@ -1062,6 +1242,7 @@ static bool map_regamma_hw_to_x_user(
}
}
+ /* this should be named differently, all it does is clamp to 0-1 */
build_new_custom_resulted_curve(hw_points_num, tf_pts);
return true;
@@ -1093,19 +1274,19 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
- rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
- GFP_KERNEL);
+ rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!rgb_user)
goto rgb_user_alloc_fail;
- rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
- GFP_KERNEL);
+ rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
- axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
- GFP_KERNEL);
+ axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
+ GFP_KERNEL);
if (!axix_x)
goto axix_x_alloc_fail;
- coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+ coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
if (!coeff)
goto coeff_alloc_fail;
@@ -1157,10 +1338,117 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
ret = true;
- kfree(coeff);
+ kvfree(coeff);
coeff_alloc_fail:
- kfree(axix_x);
+ kvfree(axix_x);
axix_x_alloc_fail:
+ kvfree(rgb_regamma);
+rgb_regamma_alloc_fail:
+ kvfree(rgb_user);
+rgb_user_alloc_fail:
+ return ret;
+}
+
+bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
+ const struct regamma_lut *regamma)
+{
+ struct gamma_coefficients coeff;
+ const struct hw_x_point *coord_x = coordinates_x;
+ uint32_t i = 0;
+
+ do {
+ coeff.a0[i] = dal_fixed31_32_from_fraction(
+ regamma->coeff.A0[i], 10000000);
+ coeff.a1[i] = dal_fixed31_32_from_fraction(
+ regamma->coeff.A1[i], 1000);
+ coeff.a2[i] = dal_fixed31_32_from_fraction(
+ regamma->coeff.A2[i], 1000);
+ coeff.a3[i] = dal_fixed31_32_from_fraction(
+ regamma->coeff.A3[i], 1000);
+ coeff.user_gamma[i] = dal_fixed31_32_from_fraction(
+ regamma->coeff.gamma[i], 1000);
+
+ ++i;
+ } while (i != 3);
+
+ i = 0;
+ /* fixed_pt library has problems handling too small values */
+ while (i != 32) {
+ output_tf->tf_pts.red[i] = dal_fixed31_32_zero;
+ output_tf->tf_pts.green[i] = dal_fixed31_32_zero;
+ output_tf->tf_pts.blue[i] = dal_fixed31_32_zero;
+ ++coord_x;
+ ++i;
+ }
+ while (i != MAX_HW_POINTS + 1) {
+ output_tf->tf_pts.red[i] = translate_from_linear_space_ex(
+ coord_x->x, &coeff, 0);
+ output_tf->tf_pts.green[i] = translate_from_linear_space_ex(
+ coord_x->x, &coeff, 1);
+ output_tf->tf_pts.blue[i] = translate_from_linear_space_ex(
+ coord_x->x, &coeff, 2);
+ ++coord_x;
+ ++i;
+ }
+
+ // this function just clamps output to 0-1
+ build_new_custom_resulted_curve(MAX_HW_POINTS, &output_tf->tf_pts);
+ output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+
+ return true;
+}
+
+bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
+ const struct regamma_lut *regamma)
+{
+ struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
+ struct dividers dividers;
+
+ struct pwl_float_data *rgb_user = NULL;
+ struct pwl_float_data_ex *rgb_regamma = NULL;
+ bool ret = false;
+
+ if (regamma == NULL)
+ return false;
+
+ output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+
+ rgb_user = kzalloc(sizeof(*rgb_user) * (GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS),
+ GFP_KERNEL);
+ if (!rgb_user)
+ goto rgb_user_alloc_fail;
+
+ rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
+ if (!rgb_regamma)
+ goto rgb_regamma_alloc_fail;
+
+ dividers.divider1 = dal_fixed31_32_from_fraction(3, 2);
+ dividers.divider2 = dal_fixed31_32_from_int(2);
+ dividers.divider3 = dal_fixed31_32_from_fraction(5, 2);
+
+ scale_user_regamma_ramp(rgb_user, &regamma->ramp, dividers);
+
+ if (regamma->flags.bits.applyDegamma == 1) {
+ apply_degamma_for_user_regamma(rgb_regamma, MAX_HW_POINTS);
+ copy_rgb_regamma_to_coordinates_x(coordinates_x,
+ MAX_HW_POINTS, rgb_regamma);
+ }
+
+ interpolate_user_regamma(MAX_HW_POINTS, rgb_user,
+ regamma->flags.bits.applyDegamma, tf_pts);
+
+ // no custom HDR curves!
+ tf_pts->end_exponent = 0;
+ tf_pts->x_point_at_y1_red = 1;
+ tf_pts->x_point_at_y1_green = 1;
+ tf_pts->x_point_at_y1_blue = 1;
+
+ // this function just clamps output to 0-1
+ build_new_custom_resulted_curve(MAX_HW_POINTS, tf_pts);
+
+ ret = true;
+
kfree(rgb_regamma);
rgb_regamma_alloc_fail:
kfree(rgb_user);
@@ -1192,19 +1480,19 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
- rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
- GFP_KERNEL);
+ rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!rgb_user)
goto rgb_user_alloc_fail;
- curve = kzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
- GFP_KERNEL);
+ curve = kvzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!curve)
goto curve_alloc_fail;
- axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
- GFP_KERNEL);
+ axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!axix_x)
goto axix_x_alloc_fail;
- coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+ coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
if (!coeff)
goto coeff_alloc_fail;
@@ -1246,13 +1534,13 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
ret = true;
- kfree(coeff);
+ kvfree(coeff);
coeff_alloc_fail:
- kfree(axix_x);
+ kvfree(axix_x);
axix_x_alloc_fail:
- kfree(curve);
+ kvfree(curve);
curve_alloc_fail:
- kfree(rgb_user);
+ kvfree(rgb_user);
rgb_user_alloc_fail:
return ret;
@@ -1281,8 +1569,9 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
}
ret = true;
} else if (trans == TRANSFER_FUNCTION_PQ) {
- rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS +
- _EXTRA_POINTS), GFP_KERNEL);
+ rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
+ (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
points->end_exponent = 7;
@@ -1302,11 +1591,12 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
}
ret = true;
- kfree(rgb_regamma);
+ kvfree(rgb_regamma);
} else if (trans == TRANSFER_FUNCTION_SRGB ||
trans == TRANSFER_FUNCTION_BT709) {
- rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS +
- _EXTRA_POINTS), GFP_KERNEL);
+ rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
+ (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!rgb_regamma)
goto rgb_regamma_alloc_fail;
points->end_exponent = 0;
@@ -1324,7 +1614,7 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
}
ret = true;
- kfree(rgb_regamma);
+ kvfree(rgb_regamma);
}
rgb_regamma_alloc_fail:
return ret;
@@ -1348,8 +1638,9 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
}
ret = true;
} else if (trans == TRANSFER_FUNCTION_PQ) {
- rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS +
- _EXTRA_POINTS), GFP_KERNEL);
+ rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
+ (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!rgb_degamma)
goto rgb_degamma_alloc_fail;
@@ -1364,11 +1655,12 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
}
ret = true;
- kfree(rgb_degamma);
+ kvfree(rgb_degamma);
} else if (trans == TRANSFER_FUNCTION_SRGB ||
trans == TRANSFER_FUNCTION_BT709) {
- rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS +
- _EXTRA_POINTS), GFP_KERNEL);
+ rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
+ (MAX_HW_POINTS + _EXTRA_POINTS),
+ GFP_KERNEL);
if (!rgb_degamma)
goto rgb_degamma_alloc_fail;
@@ -1382,7 +1674,7 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
}
ret = true;
- kfree(rgb_degamma);
+ kvfree(rgb_degamma);
}
points->end_exponent = 0;
points->x_point_at_y1_red = 1;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index b7f9bc2..b6404899 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -32,6 +32,47 @@ struct dc_transfer_func_distributed_points;
struct dc_rgb_fixed;
enum dc_transfer_func_predefined;
+/* For SetRegamma ADL interface support
+ * Must match escape type
+ */
+union regamma_flags {
+ unsigned int raw;
+ struct {
+ unsigned int gammaRampArray :1; // RegammaRamp is in use
+ unsigned int gammaFromEdid :1; //gamma from edid is in use
+ unsigned int gammaFromEdidEx :1; //gamma from edid is in use , but only for Display Id 1.2
+ unsigned int gammaFromUser :1; //user custom gamma is used
+ unsigned int coeffFromUser :1; //coeff. A0-A3 from user is in use
+ unsigned int coeffFromEdid :1; //coeff. A0-A3 from edid is in use
+ unsigned int applyDegamma :1; //flag for additional degamma correction in driver
+ unsigned int gammaPredefinedSRGB :1; //flag for SRGB gamma
+ unsigned int gammaPredefinedPQ :1; //flag for PQ gamma
+ unsigned int gammaPredefinedPQ2084Interim :1; //flag for PQ gamma, lower max nits
+ unsigned int gammaPredefined36 :1; //flag for 3.6 gamma
+ unsigned int gammaPredefinedReset :1; //flag to return to previous gamma
+ } bits;
+};
+
+struct regamma_ramp {
+ unsigned short gamma[256*3]; // gamma ramp packed in same way as OS windows ,r , g & b
+};
+
+struct regamma_coeff {
+ int gamma[3];
+ int A0[3];
+ int A1[3];
+ int A2[3];
+ int A3[3];
+};
+
+struct regamma_lut {
+ union regamma_flags flags;
+ union {
+ struct regamma_ramp ramp;
+ struct regamma_coeff coeff;
+ };
+};
+
void setup_x_points_distribution(void);
void precompute_pq(void);
void precompute_de_pq(void);
@@ -45,9 +86,14 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
struct dc_transfer_func_distributed_points *points);
-bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
+bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
struct dc_transfer_func_distributed_points *points);
+bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
+ const struct regamma_lut *regamma);
+
+bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
+ const struct regamma_lut *regamma);
#endif /* COLOR_MOD_COLOR_GAMMA_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
index 041f87b..48e0219 100644
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c
@@ -115,18 +115,22 @@ struct mod_stats *mod_stats_create(struct dc *dc)
&reg_data, sizeof(unsigned int), &flag))
core_stats->enabled = reg_data;
- core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
- if (dm_read_persistent_data(dc->ctx, NULL, NULL,
- DAL_STATS_ENTRIES_REGKEY,
- &reg_data, sizeof(unsigned int), &flag)) {
- if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
- core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
- else
- core_stats->entries = reg_data;
- }
+ if (core_stats->enabled) {
+ core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
+ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ DAL_STATS_ENTRIES_REGKEY,
+ &reg_data, sizeof(unsigned int), &flag)) {
+ if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
+ core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
+ else
+ core_stats->entries = reg_data;
+ }
- core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
- GFP_KERNEL);
+ core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries,
+ GFP_KERNEL);
+ } else {
+ core_stats->entries = 0;
+ }
if (core_stats->time == NULL)
goto fail_construct;
@@ -187,7 +191,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) {
dm_logger_write(logger, LOG_PROFILING,
- "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u\n",
+ "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
time[i].render_time_in_us,
time[i].avg_render_time_in_us_last_ten,
time[i].min_window,
@@ -227,7 +231,7 @@ void mod_stats_reset_data(struct mod_stats *mod_stats)
memset(core_stats->time, 0,
sizeof(struct stats_time_cache) * core_stats->entries);
- core_stats->index = 0;
+ core_stats->index = 1;
}
void mod_stats_update_flip(struct mod_stats *mod_stats,
@@ -250,7 +254,7 @@ void mod_stats_update_flip(struct mod_stats *mod_stats,
time[index].flip_timestamp_in_ns = timestamp_in_ns;
time[index].render_time_in_us =
- timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
+ (timestamp_in_ns - time[index - 1].flip_timestamp_in_ns) / 1000;
if (index >= 10) {
for (unsigned int i = 0; i < 10; i++)
@@ -261,10 +265,12 @@ void mod_stats_update_flip(struct mod_stats *mod_stats,
if (time[index].num_vsync_between_flips > 0)
time[index].vsync_to_flip_time_in_us =
- timestamp_in_ns - time[index].vupdate_timestamp_in_ns;
+ (timestamp_in_ns -
+ time[index].vupdate_timestamp_in_ns) / 1000;
else
time[index].vsync_to_flip_time_in_us =
- timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
+ (timestamp_in_ns -
+ time[index - 1].vupdate_timestamp_in_ns) / 1000;
core_stats->index++;
}
@@ -275,6 +281,8 @@ void mod_stats_update_vupdate(struct mod_stats *mod_stats,
struct core_stats *core_stats = NULL;
struct stats_time_cache *time = NULL;
unsigned int index = 0;
+ unsigned int num_vsyncs = 0;
+ unsigned int prev_vsync_in_ns = 0;
if (mod_stats == NULL)
return;
@@ -286,14 +294,27 @@ void mod_stats_update_vupdate(struct mod_stats *mod_stats,
time = core_stats->time;
index = core_stats->index;
+ num_vsyncs = time[index].num_vsync_between_flips;
+
+ if (num_vsyncs < MOD_STATS_NUM_VSYNCS) {
+ if (num_vsyncs == 0) {
+ prev_vsync_in_ns =
+ time[index - 1].vupdate_timestamp_in_ns;
+
+ time[index].flip_to_vsync_time_in_us =
+ (timestamp_in_ns -
+ time[index - 1].flip_timestamp_in_ns) /
+ 1000;
+ } else {
+ prev_vsync_in_ns =
+ time[index].vupdate_timestamp_in_ns;
+ }
- time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
- if (time[index].num_vsync_between_flips < MOD_STATS_NUM_VSYNCS)
- time[index].v_sync_time_in_us[time[index].num_vsync_between_flips] =
- timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns;
- time[index].flip_to_vsync_time_in_us =
- timestamp_in_ns - time[index - 1].flip_timestamp_in_ns;
+ time[index].v_sync_time_in_us[num_vsyncs] =
+ (timestamp_in_ns - prev_vsync_in_ns) / 1000;
+ }
+ time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
time[index].num_vsync_between_flips++;
}
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 9fa3aae..33de330 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -109,6 +109,26 @@ enum amd_powergating_state {
#define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12)
#define AMD_PG_SUPPORT_MMHUB (1 << 13)
+enum PP_FEATURE_MASK {
+ PP_SCLK_DPM_MASK = 0x1,
+ PP_MCLK_DPM_MASK = 0x2,
+ PP_PCIE_DPM_MASK = 0x4,
+ PP_SCLK_DEEP_SLEEP_MASK = 0x8,
+ PP_POWER_CONTAINMENT_MASK = 0x10,
+ PP_UVD_HANDSHAKE_MASK = 0x20,
+ PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
+ PP_VBI_TIME_SUPPORT_MASK = 0x80,
+ PP_ULV_MASK = 0x100,
+ PP_ENABLE_GFX_CG_THRU_SMU = 0x200,
+ PP_CLOCK_STRETCH_MASK = 0x400,
+ PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800,
+ PP_SOCCLK_DPM_MASK = 0x1000,
+ PP_DCEFCLK_DPM_MASK = 0x2000,
+ PP_OVERDRIVE_MASK = 0x4000,
+ PP_GFXOFF_MASK = 0x8000,
+ PP_ACG_MASK = 0x10000,
+};
+
struct amd_ip_funcs {
/* Name of IP block */
char *name;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
index 4ccf968..721c611 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
@@ -3895,6 +3895,10 @@
#define mmCM0_CM_MEM_PWR_CTRL_BASE_IDX 2
#define mmCM0_CM_MEM_PWR_STATUS 0x0d33
#define mmCM0_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_INDEX 0x0d35
+#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_DATA 0x0d36
+#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
@@ -4367,7 +4371,10 @@
#define mmCM1_CM_MEM_PWR_CTRL_BASE_IDX 2
#define mmCM1_CM_MEM_PWR_STATUS 0x0e4e
#define mmCM1_CM_MEM_PWR_STATUS_BASE_IDX 2
-
+#define mmCM1_CM_TEST_DEBUG_INDEX 0x0e50
+#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_DATA 0x0e51
+#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
// base address: 0x399c
@@ -4839,7 +4846,10 @@
#define mmCM2_CM_MEM_PWR_CTRL_BASE_IDX 2
#define mmCM2_CM_MEM_PWR_STATUS 0x0f69
#define mmCM2_CM_MEM_PWR_STATUS_BASE_IDX 2
-
+#define mmCM2_CM_TEST_DEBUG_INDEX 0x0f6b
+#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_DATA 0x0f6c
+#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
// base address: 0x3e08
@@ -5311,7 +5321,10 @@
#define mmCM3_CM_MEM_PWR_CTRL_BASE_IDX 2
#define mmCM3_CM_MEM_PWR_STATUS 0x1084
#define mmCM3_CM_MEM_PWR_STATUS_BASE_IDX 2
-
+#define mmCM3_CM_TEST_DEBUG_INDEX 0x1086
+#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_DATA 0x1087
+#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
// base address: 0x4274
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
index e2a2f11..e7c0cad 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h
@@ -14049,6 +14049,14 @@
#define CM0_CM_MEM_PWR_STATUS__RGAM_MEM_PWR_STATE__SHIFT 0x2
#define CM0_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L
#define CM0_CM_MEM_PWR_STATUS__RGAM_MEM_PWR_STATE_MASK 0x0000000CL
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_DATA
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
// addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h
new file mode 100644
index 0000000..9e19e72
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _df_1_7_DEFAULT_HEADER
+#define _df_1_7_DEFAULT_HEADER
+
+#define mmFabricConfigAccessControl_DEFAULT 0x00000000
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h
new file mode 100644
index 0000000..2b305dd
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _df_1_7_OFFSET_HEADER
+#define _df_1_7_OFFSET_HEADER
+
+#define mmFabricConfigAccessControl 0x0410
+#define mmFabricConfigAccessControl_BASE_IDX 0
+
+#define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc
+#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0
+
+#define mmDF_CS_AON0_DramBaseAddress0 0x0044
+#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h
new file mode 100644
index 0000000..2ba8497
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _df_1_7_SH_MASK_HEADER
+#define _df_1_7_SH_MASK_HEADER
+
+/* FabricConfigAccessControl */
+#define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0
+#define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1
+#define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10
+#define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L
+#define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L
+#define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L
+
+/* DF_PIE_AON0_DfGlobalClkGater */
+#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0
+#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL
+
+/* DF_CS_AON0_DramBaseAddress0 */
+#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
+#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
+#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
+#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
+#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
+#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
+#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
+#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
+#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
+#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index f696bbb..7931502 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -632,6 +632,13 @@ typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2
ULONG ulReserved;
}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2;
+typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3
+{
+ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock;
+ USHORT usMclk_fcw_frac; //fractional divider of fcw = usSclk_fcw_frac/65536
+ USHORT usMclk_fcw_int; //integer divider of fcwc
+}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3;
+
//Input parameter of DynamicMemorySettingsTable
//when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag = COMPUTE_MEMORY_PLL_PARAM
typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 0f5ad54..de177ce 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -501,6 +501,32 @@ enum atom_cooling_solution_id{
LIQUID_COOLING = 0x01
};
+struct atom_firmware_info_v3_2 {
+ struct atom_common_table_header table_header;
+ uint32_t firmware_revision;
+ uint32_t bootup_sclk_in10khz;
+ uint32_t bootup_mclk_in10khz;
+ uint32_t firmware_capability; // enum atombios_firmware_capability
+ uint32_t main_call_parser_entry; /* direct address of main parser call in VBIOS binary. */
+ uint32_t bios_scratch_reg_startaddr; // 1st bios scratch register dword address
+ uint16_t bootup_vddc_mv;
+ uint16_t bootup_vddci_mv;
+ uint16_t bootup_mvddc_mv;
+ uint16_t bootup_vddgfx_mv;
+ uint8_t mem_module_id;
+ uint8_t coolingsolution_id; /*0: Air cooling; 1: Liquid cooling ... */
+ uint8_t reserved1[2];
+ uint32_t mc_baseaddr_high;
+ uint32_t mc_baseaddr_low;
+ uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def
+ uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id
+ uint8_t board_i2c_feature_slave_addr;
+ uint8_t reserved3;
+ uint16_t bootup_mvddq_mv;
+ uint16_t bootup_mvpp_mv;
+ uint32_t zfbstartaddrin16mb;
+ uint32_t reserved2[3];
+};
/*
***************************************************************************
@@ -1169,7 +1195,29 @@ struct atom_gfx_info_v2_2
uint32_t rlc_gpu_timer_refclk;
};
-
+struct atom_gfx_info_v2_3 {
+ struct atom_common_table_header table_header;
+ uint8_t gfxip_min_ver;
+ uint8_t gfxip_max_ver;
+ uint8_t max_shader_engines;
+ uint8_t max_tile_pipes;
+ uint8_t max_cu_per_sh;
+ uint8_t max_sh_per_se;
+ uint8_t max_backends_per_se;
+ uint8_t max_texture_channel_caches;
+ uint32_t regaddr_cp_dma_src_addr;
+ uint32_t regaddr_cp_dma_src_addr_hi;
+ uint32_t regaddr_cp_dma_dst_addr;
+ uint32_t regaddr_cp_dma_dst_addr_hi;
+ uint32_t regaddr_cp_dma_command;
+ uint32_t regaddr_cp_status;
+ uint32_t regaddr_rlc_gpu_clock_32;
+ uint32_t rlc_gpu_timer_refclk;
+ uint8_t active_cu_per_sh;
+ uint8_t active_rb_per_se;
+ uint16_t gcgoldenoffset;
+ uint32_t rm21_sram_vmin_value;
+};
/*
***************************************************************************
@@ -1198,6 +1246,76 @@ struct atom_smu_info_v3_1
uint8_t fw_ctf_polarity; // GPIO polarity for CTF
};
+struct atom_smu_info_v3_2 {
+ struct atom_common_table_header table_header;
+ uint8_t smuip_min_ver;
+ uint8_t smuip_max_ver;
+ uint8_t smu_rsd1;
+ uint8_t gpuclk_ss_mode;
+ uint16_t sclk_ss_percentage;
+ uint16_t sclk_ss_rate_10hz;
+ uint16_t gpuclk_ss_percentage; // in unit of 0.001%
+ uint16_t gpuclk_ss_rate_10hz;
+ uint32_t core_refclk_10khz;
+ uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid
+ uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching
+ uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid
+ uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event
+ uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid
+ uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event
+ uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid
+ uint8_t fw_ctf_polarity; // GPIO polarity for CTF
+ uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid
+ uint8_t pcc_gpio_polarity; // GPIO polarity for CTF
+ uint16_t smugoldenoffset;
+ uint32_t gpupll_vco_freq_10khz;
+ uint32_t bootup_smnclk_10khz;
+ uint32_t bootup_socclk_10khz;
+ uint32_t bootup_mp0clk_10khz;
+ uint32_t bootup_mp1clk_10khz;
+ uint32_t bootup_lclk_10khz;
+ uint32_t bootup_dcefclk_10khz;
+ uint32_t ctf_threshold_override_value;
+ uint32_t reserved[5];
+};
+
+struct atom_smu_info_v3_3 {
+ struct atom_common_table_header table_header;
+ uint8_t smuip_min_ver;
+ uint8_t smuip_max_ver;
+ uint8_t smu_rsd1;
+ uint8_t gpuclk_ss_mode;
+ uint16_t sclk_ss_percentage;
+ uint16_t sclk_ss_rate_10hz;
+ uint16_t gpuclk_ss_percentage; // in unit of 0.001%
+ uint16_t gpuclk_ss_rate_10hz;
+ uint32_t core_refclk_10khz;
+ uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid
+ uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching
+ uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid
+ uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event
+ uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid
+ uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event
+ uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid
+ uint8_t fw_ctf_polarity; // GPIO polarity for CTF
+ uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid
+ uint8_t pcc_gpio_polarity; // GPIO polarity for CTF
+ uint16_t smugoldenoffset;
+ uint32_t gpupll_vco_freq_10khz;
+ uint32_t bootup_smnclk_10khz;
+ uint32_t bootup_socclk_10khz;
+ uint32_t bootup_mp0clk_10khz;
+ uint32_t bootup_mp1clk_10khz;
+ uint32_t bootup_lclk_10khz;
+ uint32_t bootup_dcefclk_10khz;
+ uint32_t ctf_threshold_override_value;
+ uint32_t syspll3_0_vco_freq_10khz;
+ uint32_t syspll3_1_vco_freq_10khz;
+ uint32_t bootup_fclk_10khz;
+ uint32_t bootup_waflclk_10khz;
+ uint32_t reserved[3];
+};
+
/*
***************************************************************************
Data Table smc_dpm_info structure
@@ -1283,7 +1401,6 @@ struct atom_smc_dpm_info_v4_1
uint32_t boardreserved[10];
};
-
/*
***************************************************************************
Data Table asic_profiling_info structure
@@ -1864,6 +1981,55 @@ enum atom_smu9_syspll0_clock_id
SMU9_SYSPLL0_DISPCLK_ID = 11, // DISPCLK
};
+enum atom_smu11_syspll_id {
+ SMU11_SYSPLL0_ID = 0,
+ SMU11_SYSPLL1_0_ID = 1,
+ SMU11_SYSPLL1_1_ID = 2,
+ SMU11_SYSPLL1_2_ID = 3,
+ SMU11_SYSPLL2_ID = 4,
+ SMU11_SYSPLL3_0_ID = 5,
+ SMU11_SYSPLL3_1_ID = 6,
+};
+
+
+enum atom_smu11_syspll0_clock_id {
+ SMU11_SYSPLL0_SOCCLK_ID = 0, // SOCCLK
+ SMU11_SYSPLL0_MP0CLK_ID = 1, // MP0CLK
+ SMU11_SYSPLL0_DCLK_ID = 2, // DCLK
+ SMU11_SYSPLL0_VCLK_ID = 3, // VCLK
+ SMU11_SYSPLL0_ECLK_ID = 4, // ECLK
+ SMU11_SYSPLL0_DCEFCLK_ID = 5, // DCEFCLK
+};
+
+
+enum atom_smu11_syspll1_0_clock_id {
+ SMU11_SYSPLL1_0_UCLKA_ID = 0, // UCLK_a
+};
+
+enum atom_smu11_syspll1_1_clock_id {
+ SMU11_SYSPLL1_0_UCLKB_ID = 0, // UCLK_b
+};
+
+enum atom_smu11_syspll1_2_clock_id {
+ SMU11_SYSPLL1_0_FCLK_ID = 0, // FCLK
+};
+
+enum atom_smu11_syspll2_clock_id {
+ SMU11_SYSPLL2_GFXCLK_ID = 0, // GFXCLK
+};
+
+enum atom_smu11_syspll3_0_clock_id {
+ SMU11_SYSPLL3_0_WAFCLK_ID = 0, // WAFCLK
+ SMU11_SYSPLL3_0_DISPCLK_ID = 1, // DISPCLK
+ SMU11_SYSPLL3_0_DPREFCLK_ID = 2, // DPREFCLK
+};
+
+enum atom_smu11_syspll3_1_clock_id {
+ SMU11_SYSPLL3_1_MP1CLK_ID = 0, // MP1CLK
+ SMU11_SYSPLL3_1_SMNCLK_ID = 1, // SMNCLK
+ SMU11_SYSPLL3_1_LCLK_ID = 2, // LCLK
+};
+
struct atom_get_smu_clock_info_output_parameters_v3_1
{
union {
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index f2814ae..a69deb3 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -42,20 +42,6 @@ enum cgs_ind_reg {
CGS_IND_REG__AUDIO_ENDPT
};
-/**
- * enum cgs_engine - Engines that can be statically power-gated
- */
-enum cgs_engine {
- CGS_ENGINE__UVD,
- CGS_ENGINE__VCE,
- CGS_ENGINE__VP8,
- CGS_ENGINE__ACP_DMA,
- CGS_ENGINE__ACP_DSP0,
- CGS_ENGINE__ACP_DSP1,
- CGS_ENGINE__ISP,
- /* ... */
-};
-
/*
* enum cgs_ucode_id - Firmware types for different IPs
*/
@@ -76,17 +62,6 @@ enum cgs_ucode_id {
CGS_UCODE_ID_MAXIMUM,
};
-/*
- * enum cgs_resource_type - GPU resource type
- */
-enum cgs_resource_type {
- CGS_RESOURCE_TYPE_MMIO = 0,
- CGS_RESOURCE_TYPE_FB,
- CGS_RESOURCE_TYPE_IO,
- CGS_RESOURCE_TYPE_DOORBELL,
- CGS_RESOURCE_TYPE_ROM,
-};
-
/**
* struct cgs_firmware_info - Firmware information
*/
@@ -104,17 +79,6 @@ struct cgs_firmware_info {
bool is_kicker;
};
-struct cgs_mode_info {
- uint32_t refresh_rate;
- uint32_t vblank_time_us;
-};
-
-struct cgs_display_info {
- uint32_t display_count;
- uint32_t active_display_mask;
- struct cgs_mode_info *mode_info;
-};
-
typedef unsigned long cgs_handle_t;
/**
@@ -170,119 +134,18 @@ typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs
#define CGS_WREG32_FIELD_IND(device, space, reg, field, val) \
cgs_write_ind_register(device, space, ix##reg, (cgs_read_ind_register(device, space, ix##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field))
-/**
- * cgs_get_pci_resource() - provide access to a device resource (PCI BAR)
- * @cgs_device: opaque device handle
- * @resource_type: Type of Resource (MMIO, IO, ROM, FB, DOORBELL)
- * @size: size of the region
- * @offset: offset from the start of the region
- * @resource_base: base address (not including offset) returned
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_get_pci_resource_t)(struct cgs_device *cgs_device,
- enum cgs_resource_type resource_type,
- uint64_t size,
- uint64_t offset,
- uint64_t *resource_base);
-
-/**
- * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table
- * @cgs_device: opaque device handle
- * @table: data table index
- * @size: size of the table (output, may be NULL)
- * @frev: table format revision (output, may be NULL)
- * @crev: table content revision (output, may be NULL)
- *
- * Return: Pointer to start of the table, or NULL on failure
- */
-typedef const void *(*cgs_atom_get_data_table_t)(
- struct cgs_device *cgs_device, unsigned table,
- uint16_t *size, uint8_t *frev, uint8_t *crev);
-
-/**
- * cgs_atom_get_cmd_table_revs() - Get ATOM BIOS command table revisions
- * @cgs_device: opaque device handle
- * @table: data table index
- * @frev: table format revision (output, may be NULL)
- * @crev: table content revision (output, may be NULL)
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_atom_get_cmd_table_revs_t)(struct cgs_device *cgs_device, unsigned table,
- uint8_t *frev, uint8_t *crev);
-
-/**
- * cgs_atom_exec_cmd_table() - Execute an ATOM BIOS command table
- * @cgs_device: opaque device handle
- * @table: command table index
- * @args: arguments
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_atom_exec_cmd_table_t)(struct cgs_device *cgs_device,
- unsigned table, void *args);
-
-/**
- * cgs_get_firmware_info - Get the firmware information from core driver
- * @cgs_device: opaque device handle
- * @type: the firmware type
- * @info: returend firmware information
- *
- * Return: 0 on success, -errno otherwise
- */
typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info);
-typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
- enum cgs_ucode_id type);
-
-typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
- enum amd_ip_block_type block_type,
- enum amd_powergating_state state);
-
-typedef int(*cgs_set_clockgating_state)(struct cgs_device *cgs_device,
- enum amd_ip_block_type block_type,
- enum amd_clockgating_state state);
-
-typedef int(*cgs_get_active_displays_info)(
- struct cgs_device *cgs_device,
- struct cgs_display_info *info);
-
-typedef int (*cgs_notify_dpm_enabled)(struct cgs_device *cgs_device, bool enabled);
-
-typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
-
-typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
-
-typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock);
-
struct cgs_ops {
/* MMIO access */
cgs_read_register_t read_register;
cgs_write_register_t write_register;
cgs_read_ind_register_t read_ind_register;
cgs_write_ind_register_t write_ind_register;
- /* PCI resources */
- cgs_get_pci_resource_t get_pci_resource;
- /* ATOM BIOS */
- cgs_atom_get_data_table_t atom_get_data_table;
- cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs;
- cgs_atom_exec_cmd_table_t atom_exec_cmd_table;
/* Firmware Info */
cgs_get_firmware_info get_firmware_info;
- cgs_rel_firmware rel_firmware;
- /* cg pg interface*/
- cgs_set_powergating_state set_powergating_state;
- cgs_set_clockgating_state set_clockgating_state;
- /* display manager */
- cgs_get_active_displays_info get_active_displays_info;
- /* notify dpm enabled */
- cgs_notify_dpm_enabled notify_dpm_enabled;
- cgs_is_virtualization_enabled_t is_virtualization_enabled;
- cgs_enter_safe_mode enter_safe_mode;
- cgs_lock_grbm_idx lock_grbm_idx;
};
struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -309,40 +172,7 @@ struct cgs_device
#define cgs_write_ind_register(dev,space,index,value) \
CGS_CALL(write_ind_register,dev,space,index,value)
-#define cgs_atom_get_data_table(dev,table,size,frev,crev) \
- CGS_CALL(atom_get_data_table,dev,table,size,frev,crev)
-#define cgs_atom_get_cmd_table_revs(dev,table,frev,crev) \
- CGS_CALL(atom_get_cmd_table_revs,dev,table,frev,crev)
-#define cgs_atom_exec_cmd_table(dev,table,args) \
- CGS_CALL(atom_exec_cmd_table,dev,table,args)
-
#define cgs_get_firmware_info(dev, type, info) \
CGS_CALL(get_firmware_info, dev, type, info)
-#define cgs_rel_firmware(dev, type) \
- CGS_CALL(rel_firmware, dev, type)
-#define cgs_set_powergating_state(dev, block_type, state) \
- CGS_CALL(set_powergating_state, dev, block_type, state)
-#define cgs_set_clockgating_state(dev, block_type, state) \
- CGS_CALL(set_clockgating_state, dev, block_type, state)
-#define cgs_notify_dpm_enabled(dev, enabled) \
- CGS_CALL(notify_dpm_enabled, dev, enabled)
-
-#define cgs_get_active_displays_info(dev, info) \
- CGS_CALL(get_active_displays_info, dev, info)
-
-#define cgs_get_pci_resource(cgs_device, resource_type, size, offset, \
- resource_base) \
- CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \
- resource_base)
-
-#define cgs_is_virtualization_enabled(cgs_device) \
- CGS_CALL(is_virtualization_enabled, cgs_device)
-
-#define cgs_enter_safe_mode(cgs_device, en) \
- CGS_CALL(enter_safe_mode, cgs_device, en)
-
-#define cgs_lock_grbm_idx(cgs_device, lock) \
- CGS_CALL(lock_grbm_idx, cgs_device, lock)
-
#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 5c840c0..06f08f3 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -94,6 +94,7 @@ enum pp_clock_type {
PP_PCIE,
OD_SCLK,
OD_MCLK,
+ OD_RANGE,
};
enum amd_pp_sensors {
@@ -149,13 +150,6 @@ struct pp_states_info {
uint32_t states[16];
};
-struct pp_gpu_power {
- uint32_t vddc_power;
- uint32_t vddci_power;
- uint32_t max_gpu_power;
- uint32_t average_gpu_power;
-};
-
#define PP_GROUP_MASK 0xF0000000
#define PP_GROUP_SHIFT 28
@@ -246,11 +240,6 @@ struct amd_pm_funcs {
int (*load_firmware)(void *handle);
int (*wait_for_fw_loading_complete)(void *handle);
int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
- int (*notify_smu_memory_info)(void *handle, uint32_t virtual_addr_low,
- uint32_t virtual_addr_hi,
- uint32_t mc_addr_low,
- uint32_t mc_addr_hi,
- uint32_t size);
int (*set_power_limit)(void *handle, uint32_t n);
int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
/* export to DC */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 7e8ad30..b493369 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -25,30 +25,16 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#include <linux/firmware.h>
#include "amd_shared.h"
#include "amd_powerplay.h"
#include "power_state.h"
#include "amdgpu.h"
#include "hwmgr.h"
-#define PP_DPM_DISABLED 0xCCCC
-
-static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
- enum amd_pm_state_type *user_state);
static const struct amd_pm_funcs pp_dpm_funcs;
-static inline int pp_check(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL)
- return -EINVAL;
-
- if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL)
- return PP_DPM_DISABLED;
-
- return 0;
-}
-
static int amd_powerplay_create(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr;
@@ -61,19 +47,21 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
return -ENOMEM;
hwmgr->adev = adev;
- hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
+ hwmgr->not_vf = !amdgpu_sriov_vf(adev);
+ hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false;
hwmgr->device = amdgpu_cgs_create_device(adev);
mutex_init(&hwmgr->smu_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
- hwmgr->feature_mask = amdgpu_pp_feature_mask;
+ hwmgr->feature_mask = adev->powerplay.pp_feature;
+ hwmgr->display_config = &adev->pm.pm_display_cfg;
adev->powerplay.pp_handle = hwmgr;
adev->powerplay.pp_funcs = &pp_dpm_funcs;
return 0;
}
-static int amd_powerplay_destroy(struct amdgpu_device *adev)
+static void amd_powerplay_destroy(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
@@ -82,8 +70,6 @@ static int amd_powerplay_destroy(struct amdgpu_device *adev)
kfree(hwmgr);
hwmgr = NULL;
-
- return 0;
}
static int pp_early_init(void *handle)
@@ -109,18 +95,9 @@ static int pp_sw_init(void *handle)
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret >= 0) {
- if (hwmgr->smumgr_funcs->smu_init == NULL)
- return -EINVAL;
-
- ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
+ ret = hwmgr_sw_init(hwmgr);
- phm_register_irq_handlers(hwmgr);
-
- pr_debug("amdgpu: powerplay sw initialized\n");
- }
+ pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
return ret;
}
@@ -129,16 +106,14 @@ static int pp_sw_fini(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- int ret = 0;
- ret = pp_check(hwmgr);
- if (ret >= 0) {
- if (hwmgr->smumgr_funcs->smu_fini != NULL)
- hwmgr->smumgr_funcs->smu_fini(hwmgr);
- }
+ hwmgr_sw_fini(hwmgr);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
amdgpu_ucode_fini_bo(adev);
+ }
return 0;
}
@@ -152,55 +127,76 @@ static int pp_hw_init(void *handle)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
amdgpu_ucode_init_bo(adev);
- ret = pp_check(hwmgr);
+ ret = hwmgr_hw_init(hwmgr);
- if (ret >= 0) {
- if (hwmgr->smumgr_funcs->start_smu == NULL)
- return -EINVAL;
+ if (ret)
+ pr_err("powerplay hw init failed\n");
- if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
- pr_err("smc start failed\n");
- hwmgr->smumgr_funcs->smu_fini(hwmgr);
- return -EINVAL;
- }
- if (ret == PP_DPM_DISABLED)
- goto exit;
- ret = hwmgr_hw_init(hwmgr);
- if (ret)
- goto exit;
- }
return ret;
-exit:
- hwmgr->pm_en = 0;
- cgs_notify_dpm_enabled(hwmgr->device, false);
- return 0;
-
}
static int pp_hw_fini(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- int ret = 0;
- ret = pp_check(hwmgr);
- if (ret == 0)
- hwmgr_hw_fini(hwmgr);
+ hwmgr_hw_fini(hwmgr);
return 0;
}
+static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
+{
+ int r = -EINVAL;
+ void *cpu_ptr = NULL;
+ uint64_t gpu_addr;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+ if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->pm.smu_prv_buffer,
+ &gpu_addr,
+ &cpu_ptr)) {
+ DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
+ return;
+ }
+
+ if (hwmgr->hwmgr_func->notify_cac_buffer_info)
+ r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
+ lower_32_bits((unsigned long)cpu_ptr),
+ upper_32_bits((unsigned long)cpu_ptr),
+ lower_32_bits(gpu_addr),
+ upper_32_bits(gpu_addr),
+ adev->pm.smu_prv_buffer_size);
+
+ if (r) {
+ amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
+ adev->pm.smu_prv_buffer = NULL;
+ DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
+ }
+}
+
static int pp_late_init(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- int ret = 0;
-
- ret = pp_check(hwmgr);
+ int ret;
- if (ret == 0)
- pp_dpm_dispatch_tasks(hwmgr,
+ if (hwmgr && hwmgr->pm_en) {
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr_handle_task(hwmgr,
AMD_PP_TASK_COMPLETE_INIT, NULL);
+ mutex_unlock(&hwmgr->smu_lock);
+ }
+ if (adev->pm.smu_prv_buffer_size != 0)
+ pp_reserve_vram_for_smu(adev);
+
+ if (hwmgr->hwmgr_func->gfx_off_control &&
+ (hwmgr->feature_mask & PP_GFXOFF_MASK)) {
+ ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, true);
+ if (ret)
+ pr_err("gfx off enabling failed!\n");
+ }
return 0;
}
@@ -209,6 +205,8 @@ static void pp_late_fini(void *handle)
{
struct amdgpu_device *adev = handle;
+ if (adev->pm.smu_prv_buffer)
+ amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
amd_powerplay_destroy(adev);
}
@@ -233,12 +231,18 @@ static int pp_set_powergating_state(void *handle,
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- int ret = 0;
+ int ret;
- ret = pp_check(hwmgr);
+ if (!hwmgr || !hwmgr->pm_en)
+ return 0;
- if (ret)
- return ret;
+ if (hwmgr->hwmgr_func->gfx_off_control) {
+ /* Enable/disable GFX off through SMU */
+ ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr,
+ state == AMD_PG_STATE_GATE);
+ if (ret)
+ pr_err("gfx off control failed!\n");
+ }
if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -254,38 +258,16 @@ static int pp_suspend(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- int ret = 0;
- ret = pp_check(hwmgr);
- if (ret == 0)
- hwmgr_hw_suspend(hwmgr);
- return 0;
+ return hwmgr_suspend(hwmgr);
}
static int pp_resume(void *handle)
{
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- int ret;
-
- ret = pp_check(hwmgr);
-
- if (ret < 0)
- return ret;
-
- if (hwmgr->smumgr_funcs->start_smu == NULL)
- return -EINVAL;
-
- if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
- pr_err("smc start failed\n");
- hwmgr->smumgr_funcs->smu_fini(hwmgr);
- return -EINVAL;
- }
-
- if (ret == PP_DPM_DISABLED)
- return 0;
- return hwmgr_hw_resume(hwmgr);
+ return hwmgr_resume(hwmgr);
}
static int pp_set_clockgating_state(void *handle,
@@ -334,12 +316,9 @@ static int pp_dpm_fw_loading_complete(void *handle)
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
-
- ret = pp_check(hwmgr);
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -362,10 +341,10 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
if (*level & profile_mode_mask) {
hwmgr->saved_dpm_level = hwmgr->dpm_level;
hwmgr->en_umd_pstate = true;
- cgs_set_clockgating_state(hwmgr->device,
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_UNGATE);
- cgs_set_powergating_state(hwmgr->device,
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
}
@@ -375,10 +354,10 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
*level = hwmgr->saved_dpm_level;
hwmgr->en_umd_pstate = false;
- cgs_set_clockgating_state(hwmgr->device,
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_CG_STATE_GATE);
- cgs_set_powergating_state(hwmgr->device,
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_GATE);
}
@@ -389,12 +368,9 @@ static int pp_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
if (level == hwmgr->dpm_level)
return 0;
@@ -412,13 +388,10 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
enum amd_dpm_forced_level level;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
level = hwmgr->dpm_level;
@@ -429,13 +402,10 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
uint32_t clk = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return 0;
if (hwmgr->hwmgr_func->get_sclk == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -450,13 +420,10 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
uint32_t clk = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return 0;
if (hwmgr->hwmgr_func->get_mclk == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -471,11 +438,8 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
static void pp_dpm_powergate_vce(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
+ if (!hwmgr || !hwmgr->pm_en)
return;
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
@@ -490,11 +454,8 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
static void pp_dpm_powergate_uvd(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
-
- ret = pp_check(hwmgr);
- if (ret)
+ if (!hwmgr || !hwmgr->pm_en)
return;
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
@@ -512,10 +473,8 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
int ret = 0;
struct pp_hwmgr *hwmgr = handle;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
ret = hwmgr_handle_task(hwmgr, task_id, user_state);
@@ -528,15 +487,9 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
struct pp_power_state *state;
- int ret = 0;
enum amd_pm_state_type pm_type;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
-
- if (hwmgr->current_ps == NULL)
+ if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
@@ -568,11 +521,8 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
-
- ret = pp_check(hwmgr);
- if (ret)
+ if (!hwmgr || !hwmgr->pm_en)
return;
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
@@ -587,13 +537,10 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
static uint32_t pp_dpm_get_fan_control_mode(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
uint32_t mode = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return 0;
if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -610,10 +557,8 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -630,10 +575,8 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -651,10 +594,8 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
return -EINVAL;
@@ -670,16 +611,10 @@ static int pp_dpm_get_pp_num_states(void *handle,
{
struct pp_hwmgr *hwmgr = handle;
int i;
- int ret = 0;
memset(data, 0, sizeof(*data));
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
-
- if (hwmgr->ps == NULL)
+ if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
@@ -713,15 +648,9 @@ static int pp_dpm_get_pp_num_states(void *handle,
static int pp_dpm_get_pp_table(void *handle, char **table)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
int size = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
-
- if (!hwmgr->soft_pp_table)
+ if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
@@ -736,10 +665,6 @@ static int amd_powerplay_reset(void *handle)
struct pp_hwmgr *hwmgr = handle;
int ret;
- ret = pp_check(hwmgr);
- if (ret)
- return ret;
-
ret = hwmgr_hw_fini(hwmgr);
if (ret)
return ret;
@@ -754,40 +679,38 @@ static int amd_powerplay_reset(void *handle)
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
+ int ret = -ENOMEM;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
if (!hwmgr->hardcode_pp_table) {
hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
hwmgr->soft_pp_table_size,
GFP_KERNEL);
- if (!hwmgr->hardcode_pp_table) {
- mutex_unlock(&hwmgr->smu_lock);
- return -ENOMEM;
- }
+ if (!hwmgr->hardcode_pp_table)
+ goto err;
}
memcpy(hwmgr->hardcode_pp_table, buf, size);
hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
- mutex_unlock(&hwmgr->smu_lock);
ret = amd_powerplay_reset(handle);
if (ret)
- return ret;
+ goto err;
if (hwmgr->hwmgr_func->avfs_control) {
ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
if (ret)
- return ret;
+ goto err;
}
-
+ mutex_unlock(&hwmgr->smu_lock);
return 0;
+err:
+ mutex_unlock(&hwmgr->smu_lock);
+ return ret;
}
static int pp_dpm_force_clock_level(void *handle,
@@ -796,10 +719,8 @@ static int pp_dpm_force_clock_level(void *handle,
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
if (hwmgr->hwmgr_func->force_clock_level == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -820,10 +741,8 @@ static int pp_dpm_print_clock_levels(void *handle,
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -840,10 +759,8 @@ static int pp_dpm_get_sclk_od(void *handle)
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -860,10 +777,8 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -881,10 +796,8 @@ static int pp_dpm_get_mclk_od(void *handle)
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -901,10 +814,8 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -922,11 +833,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
- if (ret)
- return ret;
-
- if (value == NULL)
+ if (!hwmgr || !hwmgr->pm_en || !value)
return -EINVAL;
switch (idx) {
@@ -948,14 +855,11 @@ static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
-
- ret = pp_check(hwmgr);
- if (ret)
+ if (!hwmgr || !hwmgr->pm_en)
return NULL;
- if (hwmgr && idx < hwmgr->num_vce_state_tables)
+ if (idx < hwmgr->num_vce_state_tables)
return &hwmgr->vce_states[idx];
return NULL;
}
@@ -964,7 +868,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
- if (!buf || pp_check(hwmgr))
+ if (!hwmgr || !hwmgr->pm_en || !buf)
return -EINVAL;
if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
@@ -980,12 +884,12 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
struct pp_hwmgr *hwmgr = handle;
int ret = -EINVAL;
- if (pp_check(hwmgr))
- return -EINVAL;
+ if (!hwmgr || !hwmgr->pm_en)
+ return ret;
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
- return -EINVAL;
+ return ret;
}
mutex_lock(&hwmgr->smu_lock);
if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
@@ -998,7 +902,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
{
struct pp_hwmgr *hwmgr = handle;
- if (pp_check(hwmgr))
+ if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
@@ -1016,7 +920,7 @@ static int pp_dpm_switch_power_profile(void *handle,
long workload;
uint32_t index;
- if (pp_check(hwmgr))
+ if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
@@ -1048,46 +952,12 @@ static int pp_dpm_switch_power_profile(void *handle,
return 0;
}
-static int pp_dpm_notify_smu_memory_info(void *handle,
- uint32_t virtual_addr_low,
- uint32_t virtual_addr_hi,
- uint32_t mc_addr_low,
- uint32_t mc_addr_hi,
- uint32_t size)
-{
- struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
-
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
-
- if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) {
- pr_info("%s was not implemented.\n", __func__);
- return -EINVAL;
- }
-
- mutex_lock(&hwmgr->smu_lock);
-
- ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low,
- virtual_addr_hi, mc_addr_low, mc_addr_hi,
- size);
-
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
-}
-
static int pp_set_power_limit(void *handle, uint32_t limit)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
if (hwmgr->hwmgr_func->set_power_limit == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -1104,20 +974,14 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
hwmgr->power_limit = limit;
mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return 0;
}
static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
-
- ret = pp_check(hwmgr);
- if (ret)
- return ret;
-
- if (limit == NULL)
+ if (!hwmgr || !hwmgr->pm_en ||!limit)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
@@ -1129,19 +993,16 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return 0;
}
static int pp_display_configuration_change(void *handle,
const struct amd_pp_display_configuration *display_config)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
phm_store_dal_configuration_data(hwmgr, display_config);
@@ -1155,12 +1016,7 @@ static int pp_get_display_power_level(void *handle,
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
-
- if (output == NULL)
+ if (!hwmgr || !hwmgr->pm_en ||!output)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
@@ -1177,10 +1033,8 @@ static int pp_get_current_clocks(void *handle,
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
@@ -1225,10 +1079,8 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
if (clocks == NULL)
return -EINVAL;
@@ -1246,11 +1098,7 @@ static int pp_get_clock_by_type_with_latency(void *handle,
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
- if (ret)
- return ret;
-
- if (!clocks)
+ if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
@@ -1266,11 +1114,7 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
- if (ret)
- return ret;
-
- if (!clocks)
+ if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
@@ -1287,11 +1131,7 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle,
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
- if (ret)
- return ret;
-
- if (!wm_with_clock_ranges)
+ if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
@@ -1308,11 +1148,7 @@ static int pp_display_clock_voltage_request(void *handle,
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
- if (ret)
- return ret;
-
- if (!clock)
+ if (!hwmgr || !hwmgr->pm_en ||!clock)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
@@ -1328,12 +1164,7 @@ static int pp_get_display_mode_validation_clocks(void *handle,
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
-
- if (clocks == NULL)
+ if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
@@ -1348,12 +1179,9 @@ static int pp_get_display_mode_validation_clocks(void *handle,
static int pp_set_mmhub_powergating_by_smu(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
- ret = pp_check(hwmgr);
-
- if (ret)
- return ret;
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
pr_info("%s was not implemented.\n", __func__);
@@ -1390,7 +1218,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.get_vce_clock_state = pp_dpm_get_vce_clock_state,
.switch_power_profile = pp_dpm_switch_power_profile,
.set_clockgating_by_smu = pp_set_clockgating_by_smu,
- .notify_smu_memory_info = pp_dpm_notify_smu_memory_info,
.get_power_profile_mode = pp_get_power_profile_mode,
.set_power_profile_mode = pp_set_power_profile_mode,
.odn_edit_dpm_table = pp_odn_edit_dpm_table,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index ae2e933..e411012 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -75,8 +75,7 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
{
- int ret = 1;
- bool enabled;
+ int ret = -EINVAL;;
PHM_FUNC_CHECK(hwmgr);
if (smum_is_dpm_running(hwmgr)) {
@@ -87,17 +86,12 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
- enabled = ret == 0;
-
- cgs_notify_dpm_enabled(hwmgr->device, enabled);
-
return ret;
}
int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
{
- int ret = -1;
- bool enabled;
+ int ret = -EINVAL;
PHM_FUNC_CHECK(hwmgr);
@@ -109,10 +103,6 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
if (hwmgr->hwmgr_func->dynamic_state_management_disable)
ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
- enabled = ret == 0 ? false : true;
-
- cgs_notify_dpm_enabled(hwmgr->device, enabled);
-
return ret;
}
@@ -275,13 +265,11 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
if (display_config == NULL)
return -EINVAL;
- hwmgr->display_config = *display_config;
-
if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
- hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, hwmgr->display_config.min_dcef_deep_sleep_set_clk);
+ hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
- for (index = 0; index < hwmgr->display_config.num_path_including_non_display; index++) {
- if (hwmgr->display_config.displays[index].controller_id != 0)
+ for (index = 0; index < display_config->num_path_including_non_display; index++) {
+ if (display_config->displays[index].controller_id != 0)
number_of_active_display++;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 4298205..71b4233 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -40,6 +40,7 @@ extern const struct pp_smumgr_func iceland_smu_funcs;
extern const struct pp_smumgr_func tonga_smu_funcs;
extern const struct pp_smumgr_func fiji_smu_funcs;
extern const struct pp_smumgr_func polaris10_smu_funcs;
+extern const struct pp_smumgr_func vegam_smu_funcs;
extern const struct pp_smumgr_func vega10_smu_funcs;
extern const struct pp_smumgr_func vega12_smu_funcs;
extern const struct pp_smumgr_func smu10_smu_funcs;
@@ -76,7 +77,7 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
int hwmgr_early_init(struct pp_hwmgr *hwmgr)
{
- if (hwmgr == NULL)
+ if (!hwmgr)
return -EINVAL;
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
@@ -95,7 +96,8 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
hwmgr->smumgr_funcs = &ci_smu_funcs;
ci_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
- PP_ENABLE_GFX_CG_THRU_SMU);
+ PP_ENABLE_GFX_CG_THRU_SMU |
+ PP_GFXOFF_MASK);
hwmgr->pp_table_version = PP_TABLE_V0;
hwmgr->od_enabled = false;
smu7_init_function_pointers(hwmgr);
@@ -103,9 +105,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
case AMDGPU_FAMILY_CZ:
hwmgr->od_enabled = false;
hwmgr->smumgr_funcs = &smu8_smu_funcs;
+ hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
smu8_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_VI:
+ hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
switch (hwmgr->chip_id) {
case CHIP_TOPAZ:
hwmgr->smumgr_funcs = &iceland_smu_funcs;
@@ -133,12 +137,18 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
polaris_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
break;
+ case CHIP_VEGAM:
+ hwmgr->smumgr_funcs = &vegam_smu_funcs;
+ polaris_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
+ break;
default:
return -EINVAL;
}
smu7_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_AI:
+ hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
switch (hwmgr->chip_id) {
case CHIP_VEGA10:
hwmgr->smumgr_funcs = &vega10_smu_funcs;
@@ -170,22 +180,58 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
return 0;
}
+int hwmgr_sw_init(struct pp_hwmgr *hwmgr)
+{
+ if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init)
+ return -EINVAL;
+
+ phm_register_irq_handlers(hwmgr);
+
+ return hwmgr->smumgr_funcs->smu_init(hwmgr);
+}
+
+
+int hwmgr_sw_fini(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr && hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->smu_fini)
+ hwmgr->smumgr_funcs->smu_fini(hwmgr);
+
+ return 0;
+}
+
int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- if (hwmgr == NULL)
+ if (!hwmgr || !hwmgr->smumgr_funcs)
return -EINVAL;
- if (hwmgr->pptable_func == NULL ||
- hwmgr->pptable_func->pptable_init == NULL ||
- hwmgr->hwmgr_func->backend_init == NULL)
- return -EINVAL;
+ if (hwmgr->smumgr_funcs->start_smu) {
+ ret = hwmgr->smumgr_funcs->start_smu(hwmgr);
+ if (ret) {
+ pr_err("smc start failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!hwmgr->pm_en)
+ return 0;
+
+ if (!hwmgr->pptable_func ||
+ !hwmgr->pptable_func->pptable_init ||
+ !hwmgr->hwmgr_func->backend_init) {
+ hwmgr->pm_en = false;
+ pr_info("dpm not supported \n");
+ return 0;
+ }
ret = hwmgr->pptable_func->pptable_init(hwmgr);
if (ret)
goto err;
+ ((struct amdgpu_device *)hwmgr->adev)->pm.no_fan =
+ hwmgr->thermal_controller.fanInfo.bNoFan;
+
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
if (ret)
goto err1;
@@ -206,6 +252,8 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
if (ret)
goto err2;
+ ((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = true;
+
return 0;
err2:
if (hwmgr->hwmgr_func->backend_fini)
@@ -214,14 +262,13 @@ err1:
if (hwmgr->pptable_func->pptable_fini)
hwmgr->pptable_func->pptable_fini(hwmgr);
err:
- pr_err("amdgpu: powerplay initialization failed\n");
return ret;
}
int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
{
- if (hwmgr == NULL)
- return -EINVAL;
+ if (!hwmgr || !hwmgr->pm_en)
+ return 0;
phm_stop_thermal_controller(hwmgr);
psm_set_boot_states(hwmgr);
@@ -236,12 +283,12 @@ int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
return psm_fini_power_state_table(hwmgr);
}
-int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr)
+int hwmgr_suspend(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- if (hwmgr == NULL)
- return -EINVAL;
+ if (!hwmgr || !hwmgr->pm_en)
+ return 0;
phm_disable_smc_firmware_ctf(hwmgr);
ret = psm_set_boot_states(hwmgr);
@@ -255,13 +302,23 @@ int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr)
return ret;
}
-int hwmgr_hw_resume(struct pp_hwmgr *hwmgr)
+int hwmgr_resume(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- if (hwmgr == NULL)
+ if (!hwmgr)
return -EINVAL;
+ if (hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->start_smu) {
+ if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
+ pr_err("smc start failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!hwmgr->pm_en)
+ return 0;
+
ret = phm_setup_asic(hwmgr);
if (ret)
return ret;
@@ -270,9 +327,6 @@ int hwmgr_hw_resume(struct pp_hwmgr *hwmgr)
if (ret)
return ret;
ret = phm_start_thermal_controller(hwmgr);
- if (ret)
- return ret;
-
ret |= psm_set_performance_states(hwmgr);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 0f2851b..308bff2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -46,7 +46,7 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
sizeof(struct pp_power_state);
if (table_entries == 0 || size == 0) {
- pr_warn("Please check whether power state management is suppported on this asic\n");
+ pr_warn("Please check whether power state management is supported on this asic\n");
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index c6febbf..cf99c5e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -23,7 +23,8 @@
#include "pp_debug.h"
#include <linux/module.h>
#include <linux/slab.h>
-
+#include <linux/delay.h>
+#include "atom.h"
#include "ppatomctrl.h"
#include "atombios.h"
#include "cgs_common.h"
@@ -128,7 +129,6 @@ static int atomctrl_set_mc_reg_address_table(
return 0;
}
-
int atomctrl_initialize_mc_reg_table(
struct pp_hwmgr *hwmgr,
uint8_t module_index,
@@ -141,7 +141,7 @@ int atomctrl_initialize_mc_reg_table(
u16 size;
vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *)
- cgs_atom_get_data_table(hwmgr->device,
+ smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev);
if (module_index >= vram_info->ucNumOfVRAMModule) {
@@ -174,6 +174,8 @@ int atomctrl_set_engine_dram_timings_rv770(
uint32_t engine_clock,
uint32_t memory_clock)
{
+ struct amdgpu_device *adev = hwmgr->adev;
+
SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters;
/* They are both in 10KHz Units. */
@@ -184,9 +186,10 @@ int atomctrl_set_engine_dram_timings_rv770(
/* in 10 khz units.*/
engine_clock_parameters.sReserved.ulClock =
cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
- return cgs_atom_exec_cmd_table(hwmgr->device,
+
+ return amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
- &engine_clock_parameters);
+ (uint32_t *)&engine_clock_parameters);
}
/**
@@ -203,7 +206,7 @@ static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device)
union voltage_object_info *voltage_info;
voltage_info = (union voltage_object_info *)
- cgs_atom_get_data_table(device, index,
+ smu_atom_get_data_table(device, index,
&size, &frev, &crev);
if (voltage_info != NULL)
@@ -247,16 +250,16 @@ int atomctrl_get_memory_pll_dividers_si(
pp_atomctrl_memory_clock_param *mpll_param,
bool strobe_mode)
{
+ struct amdgpu_device *adev = hwmgr->adev;
COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
int result;
mpll_parameters.ulClock = cpu_to_le32(clock_value);
mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
- result = cgs_atom_exec_cmd_table
- (hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
- &mpll_parameters);
+ (uint32_t *)&mpll_parameters);
if (0 == result) {
mpll_param->mpll_fb_divider.clk_frac =
@@ -295,14 +298,15 @@ int atomctrl_get_memory_pll_dividers_si(
int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param)
{
+ struct amdgpu_device *adev = hwmgr->adev;
COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
int result;
mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
- result = cgs_atom_exec_cmd_table(hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
- &mpll_parameters);
+ (uint32_t *)&mpll_parameters);
if (!result)
mpll_param->mpll_post_divider =
@@ -311,19 +315,49 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
return result;
}
+int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
+ uint32_t clock_value,
+ pp_atomctrl_memory_clock_param_ai *mpll_param)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {0};
+ int result;
+
+ mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
+
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
+ (uint32_t *)&mpll_parameters);
+
+ /* VEGAM's mpll takes sometime to finish computing */
+ udelay(10);
+
+ if (!result) {
+ mpll_param->ulMclk_fcw_int =
+ le16_to_cpu(mpll_parameters.usMclk_fcw_int);
+ mpll_param->ulMclk_fcw_frac =
+ le16_to_cpu(mpll_parameters.usMclk_fcw_frac);
+ mpll_param->ulClock =
+ le32_to_cpu(mpll_parameters.ulClock.ulClock);
+ mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv;
+ }
+
+ return result;
+}
+
int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
uint32_t clock_value,
pp_atomctrl_clock_dividers_kong *dividers)
{
+ struct amdgpu_device *adev = hwmgr->adev;
COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
int result;
pll_parameters.ulClock = cpu_to_le32(clock_value);
- result = cgs_atom_exec_cmd_table
- (hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
- &pll_parameters);
+ (uint32_t *)&pll_parameters);
if (0 == result) {
dividers->pll_post_divider = pll_parameters.ucPostDiv;
@@ -338,16 +372,16 @@ int atomctrl_get_engine_pll_dividers_vi(
uint32_t clock_value,
pp_atomctrl_clock_dividers_vi *dividers)
{
+ struct amdgpu_device *adev = hwmgr->adev;
COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
int result;
pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
- result = cgs_atom_exec_cmd_table
- (hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
- &pll_patameters);
+ (uint32_t *)&pll_patameters);
if (0 == result) {
dividers->pll_post_divider =
@@ -375,16 +409,16 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
uint32_t clock_value,
pp_atomctrl_clock_dividers_ai *dividers)
{
+ struct amdgpu_device *adev = hwmgr->adev;
COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
int result;
pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
- result = cgs_atom_exec_cmd_table
- (hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
- &pll_patameters);
+ (uint32_t *)&pll_patameters);
if (0 == result) {
dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
@@ -407,6 +441,7 @@ int atomctrl_get_dfs_pll_dividers_vi(
uint32_t clock_value,
pp_atomctrl_clock_dividers_vi *dividers)
{
+ struct amdgpu_device *adev = hwmgr->adev;
COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
int result;
@@ -414,10 +449,9 @@ int atomctrl_get_dfs_pll_dividers_vi(
pll_patameters.ulClock.ucPostDiv =
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
- result = cgs_atom_exec_cmd_table
- (hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
- &pll_patameters);
+ (uint32_t *)&pll_patameters);
if (0 == result) {
dividers->pll_post_divider =
@@ -452,7 +486,7 @@ uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr)
uint32_t clock;
fw_info = (ATOM_FIRMWARE_INFO *)
- cgs_atom_get_data_table(hwmgr->device,
+ smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, FirmwareInfo),
&size, &frev, &crev);
@@ -476,7 +510,7 @@ bool atomctrl_is_voltage_controlled_by_gpio_v3(
uint8_t voltage_mode)
{
ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
- (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device);
+ (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
bool ret;
PP_ASSERT_WITH_CODE((NULL != voltage_info),
@@ -495,7 +529,7 @@ int atomctrl_get_voltage_table_v3(
pp_atomctrl_voltage_table *voltage_table)
{
ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
- (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device);
+ (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
unsigned int i;
@@ -572,7 +606,7 @@ static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device)
void *table_address;
table_address = (ATOM_GPIO_PIN_LUT *)
- cgs_atom_get_data_table(device,
+ smu_atom_get_data_table(device,
GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT),
&size, &frev, &crev);
@@ -592,7 +626,7 @@ bool atomctrl_get_pp_assign_pin(
{
bool bRet = false;
ATOM_GPIO_PIN_LUT *gpio_lookup_table =
- get_gpio_lookup_table(hwmgr->device);
+ get_gpio_lookup_table(hwmgr->adev);
PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
"Could not find GPIO lookup Table in BIOS.", return false);
@@ -613,7 +647,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
bool debug)
{
ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo;
-
+ struct amdgpu_device *adev = hwmgr->adev;
EFUSE_LINEAR_FUNC_PARAM sRO_fuse;
EFUSE_LINEAR_FUNC_PARAM sCACm_fuse;
EFUSE_LINEAR_FUNC_PARAM sCACb_fuse;
@@ -640,7 +674,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
int result;
getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *)
- cgs_atom_get_data_table(hwmgr->device,
+ smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
NULL, NULL, NULL);
@@ -706,9 +740,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
sOutput_FuseValues.sEfuse = sInput_FuseValues;
- result = cgs_atom_exec_cmd_table(hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- &sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues);
if (result)
return result;
@@ -727,9 +761,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
sOutput_FuseValues.sEfuse = sInput_FuseValues;
- result = cgs_atom_exec_cmd_table(hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- &sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues);
if (result)
return result;
@@ -747,9 +781,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength;
sOutput_FuseValues.sEfuse = sInput_FuseValues;
- result = cgs_atom_exec_cmd_table(hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- &sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues);
if (result)
return result;
@@ -768,9 +802,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
sOutput_FuseValues.sEfuse = sInput_FuseValues;
- result = cgs_atom_exec_cmd_table(hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- &sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues);
if (result)
return result;
@@ -790,9 +824,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
sOutput_FuseValues.sEfuse = sInput_FuseValues;
- result = cgs_atom_exec_cmd_table(hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- &sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues);
if (result)
return result;
@@ -811,9 +845,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength;
sOutput_FuseValues.sEfuse = sInput_FuseValues;
- result = cgs_atom_exec_cmd_table(hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- &sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues);
if (result)
return result;
@@ -842,9 +876,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
sOutput_FuseValues.sEfuse = sInput_FuseValues;
- result = cgs_atom_exec_cmd_table(hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- &sOutput_FuseValues);
+ (uint32_t *)&sOutput_FuseValues);
if (result)
return result;
@@ -1053,8 +1087,9 @@ int atomctrl_get_voltage_evv_on_sclk(
uint32_t sclk, uint16_t virtual_voltage_Id,
uint16_t *voltage)
{
- int result;
+ struct amdgpu_device *adev = hwmgr->adev;
GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
+ int result;
get_voltage_info_param_space.ucVoltageType =
voltage_type;
@@ -1065,9 +1100,9 @@ int atomctrl_get_voltage_evv_on_sclk(
get_voltage_info_param_space.ulSCLKFreq =
cpu_to_le32(sclk);
- result = cgs_atom_exec_cmd_table(hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
- &get_voltage_info_param_space);
+ (uint32_t *)&get_voltage_info_param_space);
if (0 != result)
return result;
@@ -1088,9 +1123,10 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
uint16_t virtual_voltage_id,
uint16_t *voltage)
{
+ struct amdgpu_device *adev = hwmgr->adev;
+ GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
int result;
int entry_id;
- GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
/* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
@@ -1111,9 +1147,9 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
get_voltage_info_param_space.ulSCLKFreq =
cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
- result = cgs_atom_exec_cmd_table(hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
- &get_voltage_info_param_space);
+ (uint32_t *)&get_voltage_info_param_space);
if (0 != result)
return result;
@@ -1135,7 +1171,7 @@ uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr)
u16 size;
fw_info = (ATOM_COMMON_TABLE_HEADER *)
- cgs_atom_get_data_table(hwmgr->device,
+ smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, FirmwareInfo),
&size, &frev, &crev);
@@ -1167,7 +1203,7 @@ static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device)
u16 size;
table = (ATOM_ASIC_INTERNAL_SS_INFO *)
- cgs_atom_get_data_table(device,
+ smu_atom_get_data_table(device,
GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info),
&size, &frev, &crev);
@@ -1188,7 +1224,7 @@ static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info));
- table = asic_internal_ss_get_ss_table(hwmgr->device);
+ table = asic_internal_ss_get_ss_table(hwmgr->adev);
if (NULL == table)
return -1;
@@ -1260,9 +1296,10 @@ int atomctrl_get_engine_clock_spread_spectrum(
ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo);
}
-int atomctrl_read_efuse(void *device, uint16_t start_index,
+int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
uint16_t end_index, uint32_t mask, uint32_t *efuse)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int result;
READ_EFUSE_VALUE_PARAMETER efuse_param;
@@ -1272,9 +1309,9 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
efuse_param.sEfuse.ucBitLength = (uint8_t)
((end_index - start_index) + 1);
- result = cgs_atom_exec_cmd_table(device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
- &efuse_param);
+ (uint32_t *)&efuse_param);
if (!result)
*efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask;
@@ -1284,6 +1321,7 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
uint8_t level)
{
+ struct amdgpu_device *adev = hwmgr->adev;
DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
int result;
@@ -1293,10 +1331,9 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
ADJUST_MC_SETTING_PARAM;
memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
- result = cgs_atom_exec_cmd_table
- (hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
- &memory_clock_parameters);
+ (uint32_t *)&memory_clock_parameters);
return result;
}
@@ -1304,7 +1341,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
{
-
+ struct amdgpu_device *adev = hwmgr->adev;
int result;
GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space;
@@ -1313,9 +1350,9 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
- result = cgs_atom_exec_cmd_table(hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
- &get_voltage_info_param_space);
+ (uint32_t *)&get_voltage_info_param_space);
if (0 != result)
return result;
@@ -1334,7 +1371,7 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
u16 size;
ATOM_SMU_INFO_V2_1 *psmu_info =
- (ATOM_SMU_INFO_V2_1 *)cgs_atom_get_data_table(hwmgr->device,
+ (ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, SMU_Info),
&size, &frev, &crev);
@@ -1362,7 +1399,7 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
return -EINVAL;
profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
- cgs_atom_get_data_table(hwmgr->device,
+ smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
NULL, NULL, NULL);
if (!profile)
@@ -1402,7 +1439,7 @@ int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint16_t *load_line)
{
ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info =
- (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device);
+ (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev);
const ATOM_VOLTAGE_OBJECT_V3 *voltage_object;
@@ -1421,16 +1458,17 @@ int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id)
{
- int result;
+ struct amdgpu_device *adev = hwmgr->adev;
SET_VOLTAGE_PS_ALLOCATION allocation;
SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters =
(SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage;
+ int result;
voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID;
- result = cgs_atom_exec_cmd_table(hwmgr->device,
+ result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
GetIndexIntoMasterTable(COMMAND, SetVoltage),
- voltage_parameters);
+ (uint32_t *)voltage_parameters);
*virtual_voltage_id = voltage_parameters->usVoltageLevel;
@@ -1453,7 +1491,7 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
- cgs_atom_get_data_table(hwmgr->device,
+ smu_atom_get_data_table(hwmgr->adev,
ix,
NULL, NULL, NULL);
if (!profile)
@@ -1498,3 +1536,31 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
return 0;
}
+
+void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
+ uint32_t *min_vddc)
+{
+ void *profile;
+
+ profile = smu_atom_get_data_table(hwmgr->adev,
+ GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
+ NULL, NULL, NULL);
+
+ if (profile) {
+ switch (hwmgr->chip_id) {
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4);
+ *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4);
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100);
+ *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100);
+ break;
+ default:
+ return;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index c44a920..3ee54f1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -146,6 +146,14 @@ struct pp_atomctrl_memory_clock_param {
};
typedef struct pp_atomctrl_memory_clock_param pp_atomctrl_memory_clock_param;
+struct pp_atomctrl_memory_clock_param_ai {
+ uint32_t ulClock;
+ uint32_t ulPostDiv;
+ uint16_t ulMclk_fcw_frac;
+ uint16_t ulMclk_fcw_int;
+};
+typedef struct pp_atomctrl_memory_clock_param_ai pp_atomctrl_memory_clock_param_ai;
+
struct pp_atomctrl_internal_ss_info {
uint32_t speed_spectrum_percentage; /* in 1/100 percentage */
uint32_t speed_spectrum_rate; /* in KHz */
@@ -295,10 +303,12 @@ extern bool atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr *hwmgr, ui
extern int atomctrl_get_voltage_table_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, pp_atomctrl_voltage_table *voltage_table);
extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param);
+extern int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
+ uint32_t clock_value, pp_atomctrl_memory_clock_param_ai *mpll_param);
extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
uint32_t clock_value,
pp_atomctrl_clock_dividers_kong *dividers);
-extern int atomctrl_read_efuse(void *device, uint16_t start_index,
+extern int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
uint16_t end_index, uint32_t mask, uint32_t *efuse);
extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug);
@@ -320,5 +330,8 @@ extern int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
uint16_t virtual_voltage_id,
uint16_t efuse_voltage_id);
extern int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id);
+
+extern void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
+ uint32_t *min_vddc);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index ad42caa..c97b0e5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -23,9 +23,9 @@
#include "ppatomfwctrl.h"
#include "atomfirmware.h"
+#include "atom.h"
#include "pp_debug.h"
-
static const union atom_voltage_object_v4 *pp_atomfwctrl_lookup_voltage_type_v4(
const struct atom_voltage_objects_info_v4_1 *voltage_object_info_table,
uint8_t voltage_type, uint8_t voltage_mode)
@@ -38,35 +38,34 @@ static const union atom_voltage_object_v4 *pp_atomfwctrl_lookup_voltage_type_v4(
while (offset < size) {
const union atom_voltage_object_v4 *voltage_object =
- (const union atom_voltage_object_v4 *)(start + offset);
+ (const union atom_voltage_object_v4 *)(start + offset);
- if (voltage_type == voltage_object->gpio_voltage_obj.header.voltage_type &&
- voltage_mode == voltage_object->gpio_voltage_obj.header.voltage_mode)
- return voltage_object;
+ if (voltage_type == voltage_object->gpio_voltage_obj.header.voltage_type &&
+ voltage_mode == voltage_object->gpio_voltage_obj.header.voltage_mode)
+ return voltage_object;
- offset += le16_to_cpu(voltage_object->gpio_voltage_obj.header.object_size);
+ offset += le16_to_cpu(voltage_object->gpio_voltage_obj.header.object_size);
- }
+ }
- return NULL;
+ return NULL;
}
static struct atom_voltage_objects_info_v4_1 *pp_atomfwctrl_get_voltage_info_table(
struct pp_hwmgr *hwmgr)
{
- const void *table_address;
- uint16_t idx;
+ const void *table_address;
+ uint16_t idx;
- idx = GetIndexIntoMasterDataTable(voltageobject_info);
- table_address = cgs_atom_get_data_table(hwmgr->device,
- idx, NULL, NULL, NULL);
+ idx = GetIndexIntoMasterDataTable(voltageobject_info);
+ table_address = smu_atom_get_data_table(hwmgr->adev,
+ idx, NULL, NULL, NULL);
- PP_ASSERT_WITH_CODE(
- table_address,
- "Error retrieving BIOS Table Address!",
- return NULL);
+ PP_ASSERT_WITH_CODE(table_address,
+ "Error retrieving BIOS Table Address!",
+ return NULL);
- return (struct atom_voltage_objects_info_v4_1 *)table_address;
+ return (struct atom_voltage_objects_info_v4_1 *)table_address;
}
/**
@@ -167,7 +166,7 @@ static struct atom_gpio_pin_lut_v2_1 *pp_atomfwctrl_get_gpio_lookup_table(
uint16_t idx;
idx = GetIndexIntoMasterDataTable(gpio_pin_lut);
- table_address = cgs_atom_get_data_table(hwmgr->device,
+ table_address = smu_atom_get_data_table(hwmgr->adev,
idx, NULL, NULL, NULL);
PP_ASSERT_WITH_CODE(table_address,
"Error retrieving BIOS Table Address!",
@@ -248,28 +247,30 @@ int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
uint32_t clock_type, uint32_t clock_value,
struct pp_atomfwctrl_clock_dividers_soc15 *dividers)
{
+ struct amdgpu_device *adev = hwmgr->adev;
struct compute_gpu_clock_input_parameter_v1_8 pll_parameters;
struct compute_gpu_clock_output_parameter_v1_8 *pll_output;
- int result;
uint32_t idx;
pll_parameters.gpuclock_10khz = (uint32_t)clock_value;
pll_parameters.gpu_clock_type = clock_type;
idx = GetIndexIntoMasterCmdTable(computegpuclockparam);
- result = cgs_atom_exec_cmd_table(hwmgr->device, idx, &pll_parameters);
-
- if (!result) {
- pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *)
- &pll_parameters;
- dividers->ulClock = le32_to_cpu(pll_output->gpuclock_10khz);
- dividers->ulDid = le32_to_cpu(pll_output->dfs_did);
- dividers->ulPll_fb_mult = le32_to_cpu(pll_output->pll_fb_mult);
- dividers->ulPll_ss_fbsmult = le32_to_cpu(pll_output->pll_ss_fbsmult);
- dividers->usPll_ss_slew_frac = le16_to_cpu(pll_output->pll_ss_slew_frac);
- dividers->ucPll_ss_enable = pll_output->pll_ss_enable;
- }
- return result;
+
+ if (amdgpu_atom_execute_table(
+ adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters))
+ return -EINVAL;
+
+ pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *)
+ &pll_parameters;
+ dividers->ulClock = le32_to_cpu(pll_output->gpuclock_10khz);
+ dividers->ulDid = le32_to_cpu(pll_output->dfs_did);
+ dividers->ulPll_fb_mult = le32_to_cpu(pll_output->pll_fb_mult);
+ dividers->ulPll_ss_fbsmult = le32_to_cpu(pll_output->pll_ss_fbsmult);
+ dividers->usPll_ss_slew_frac = le16_to_cpu(pll_output->pll_ss_slew_frac);
+ dividers->ucPll_ss_enable = pll_output->pll_ss_enable;
+
+ return 0;
}
int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
@@ -283,7 +284,7 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
idx = GetIndexIntoMasterDataTable(asic_profiling_info);
profile = (struct atom_asic_profiling_info_v4_1 *)
- cgs_atom_get_data_table(hwmgr->device,
+ smu_atom_get_data_table(hwmgr->adev,
idx, NULL, NULL, NULL);
if (!profile)
@@ -467,7 +468,7 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
idx = GetIndexIntoMasterDataTable(smu_info);
info = (struct atom_smu_info_v3_1 *)
- cgs_atom_get_data_table(hwmgr->device,
+ smu_atom_get_data_table(hwmgr->adev,
idx, NULL, NULL, NULL);
if (!info) {
@@ -487,8 +488,9 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
return 0;
}
-int pp_atomfwctrl__get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKID id, uint32_t *frequency)
+int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKID id, uint32_t *frequency)
{
+ struct amdgpu_device *adev = hwmgr->adev;
struct atom_get_smu_clock_info_parameters_v3_1 parameters;
struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
uint32_t ix;
@@ -497,13 +499,13 @@ int pp_atomfwctrl__get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLK
parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
ix = GetIndexIntoMasterCmdTable(getsmuclockinfo);
- if (!cgs_atom_exec_cmd_table(hwmgr->device, ix, &parameters)) {
- output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&parameters;
- *frequency = output->atom_smu_outputclkfreq.smu_clock_freq_hz / 10000;
- } else {
- pr_info("Error execute_table getsmuclockinfo!");
- return -1;
- }
+
+ if (amdgpu_atom_execute_table(
+ adev->mode_info.atom_context, ix, (uint32_t *)&parameters))
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&parameters;
+ *frequency = output->atom_smu_outputclkfreq.smu_clock_freq_hz / 10000;
return 0;
}
@@ -513,11 +515,10 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
{
struct atom_firmware_info_v3_1 *info = NULL;
uint16_t ix;
- uint32_t frequency = 0;
ix = GetIndexIntoMasterDataTable(firmwareinfo);
info = (struct atom_firmware_info_v3_1 *)
- cgs_atom_get_data_table(hwmgr->device,
+ smu_atom_get_data_table(hwmgr->adev,
ix, NULL, NULL, NULL);
if (!info) {
@@ -536,12 +537,6 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
boot_values->ulSocClk = 0;
boot_values->ulDCEFClk = 0;
- if (!pp_atomfwctrl__get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
- boot_values->ulSocClk = frequency;
-
- if (!pp_atomfwctrl__get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
- boot_values->ulDCEFClk = frequency;
-
return 0;
}
@@ -553,7 +548,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
ix = GetIndexIntoMasterDataTable(smc_dpm_info);
info = (struct atom_smc_dpm_info_v4_1 *)
- cgs_atom_get_data_table(hwmgr->device,
+ smu_atom_get_data_table(hwmgr->adev,
ix, NULL, NULL, NULL);
if (!info) {
pr_info("Error retrieving BIOS Table Address!");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index 8df1e84f..fe10aa4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -230,6 +230,8 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_bios_boot_up_values *boot_values);
int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_smc_dpm_parameters *param);
+int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
+ BIOS_CLKID id, uint32_t *frequency);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index c9eecce..f0d48b1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -141,7 +141,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
if (!table_address) {
table_address = (ATOM_Tonga_POWERPLAYTABLE *)
- cgs_atom_get_data_table(hwmgr->device,
+ smu_atom_get_data_table(hwmgr->adev,
index, &size, &frev, &crev);
hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/
hwmgr->soft_pp_table_size = size;
@@ -728,6 +728,32 @@ static int get_mm_clock_voltage_table(
return 0;
}
+static int get_gpio_table(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_gpio_table **pp_tonga_gpio_table,
+ const ATOM_Tonga_GPIO_Table *atom_gpio_table)
+{
+ uint32_t table_size;
+ struct phm_ppt_v1_gpio_table *pp_gpio_table;
+ struct phm_ppt_v1_information *pp_table_information =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ table_size = sizeof(struct phm_ppt_v1_gpio_table);
+ pp_gpio_table = kzalloc(table_size, GFP_KERNEL);
+ if (!pp_gpio_table)
+ return -ENOMEM;
+
+ if (pp_table_information->vdd_dep_on_sclk->count <
+ atom_gpio_table->ucVRHotTriggeredSclkDpmIndex)
+ PP_ASSERT_WITH_CODE(false,
+ "SCLK DPM index for VRHot cannot exceed the total sclk level count!",);
+ else
+ pp_gpio_table->vrhot_triggered_sclk_dpm_index =
+ atom_gpio_table->ucVRHotTriggeredSclkDpmIndex;
+
+ *pp_tonga_gpio_table = pp_gpio_table;
+
+ return 0;
+}
/**
* Private Function used during initialization.
* Initialize clock voltage dependency
@@ -761,11 +787,15 @@ static int init_clock_voltage_dependency(
const PPTable_Generic_SubTable_Header *pcie_table =
(const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
le16_to_cpu(powerplay_table->usPCIETableOffset));
+ const ATOM_Tonga_GPIO_Table *gpio_table =
+ (const ATOM_Tonga_GPIO_Table *)(((unsigned long) powerplay_table) +
+ le16_to_cpu(powerplay_table->usGPIOTableOffset));
pp_table_information->vdd_dep_on_sclk = NULL;
pp_table_information->vdd_dep_on_mclk = NULL;
pp_table_information->mm_dep_table = NULL;
pp_table_information->pcie_table = NULL;
+ pp_table_information->gpio_table = NULL;
if (powerplay_table->usMMDependencyTableOffset != 0)
result = get_mm_clock_voltage_table(hwmgr,
@@ -810,6 +840,10 @@ static int init_clock_voltage_dependency(
result = get_valid_clk(hwmgr, &pp_table_information->valid_sclk_values,
pp_table_information->vdd_dep_on_sclk);
+ if (!result && gpio_table)
+ result = get_gpio_table(hwmgr, &pp_table_information->gpio_table,
+ gpio_table);
+
return result;
}
@@ -1116,6 +1150,9 @@ static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr)
kfree(pp_table_information->pcie_table);
pp_table_information->pcie_table = NULL;
+ kfree(pp_table_information->gpio_table);
+ pp_table_information->gpio_table = NULL;
+
kfree(hwmgr->pptable);
hwmgr->pptable = NULL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 36ca7c4..ce64dfa 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -837,7 +837,7 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
hwmgr->soft_pp_table = &soft_dummy_pp_table[0];
hwmgr->soft_pp_table_size = sizeof(soft_dummy_pp_table);
} else {
- table_addr = cgs_atom_get_data_table(hwmgr->device,
+ table_addr = smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, PowerPlayInfo),
&size, &frev, &crev);
hwmgr->soft_pp_table = table_addr;
@@ -1058,7 +1058,7 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr,
return 0;
/* We assume here that fw_info is unchanged if this call fails.*/
- fw_info = cgs_atom_get_data_table(hwmgr->device,
+ fw_info = smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, FirmwareInfo),
&size, &frev, &crev);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 10253b8..2f69bfa 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -34,7 +34,7 @@
#include "rv_ppsmc.h"
#include "smu10_hwmgr.h"
#include "power_state.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
@@ -42,6 +42,13 @@
#define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
#define SMC_RAM_END 0x40000
+#define mmPWR_MISC_CNTL_STATUS 0x0183
+#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
+
static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
@@ -74,11 +81,15 @@ static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
smu10_data->thermal_auto_throttling_treshold = 0;
smu10_data->is_nb_dpm_enabled = 1;
smu10_data->dpm_flags = 1;
- smu10_data->gfx_off_controled_by_driver = false;
smu10_data->need_min_deep_sleep_dcefclk = true;
smu10_data->num_active_display = 0;
smu10_data->deep_sleep_dcefclk = 0;
+ if (hwmgr->feature_mask & PP_GFXOFF_MASK)
+ smu10_data->gfx_off_controled_by_driver = true;
+ else
+ smu10_data->gfx_off_controled_by_driver = false;
+
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
@@ -161,7 +172,7 @@ static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
struct PP_Clocks clocks = {0};
struct pp_display_clock_request clock_req;
- clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
+ clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
clock_req.clock_type = amd_pp_dcf_clock;
clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
@@ -206,12 +217,18 @@ static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input
static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
smu10_data->vcn_power_gated = true;
smu10_data->isp_tileA_power_gated = true;
smu10_data->isp_tileB_power_gated = true;
- return 0;
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetGfxCGPG,
+ true);
+ else
+ return 0;
}
@@ -237,13 +254,31 @@ static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
return smu10_reset_cc6_data(hwmgr);
}
+static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reg;
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ reg = RREG32_SOC15(PWR, 0, mmPWR_MISC_CNTL_STATUS);
+ if ((reg & PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK) ==
+ (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT))
+ return true;
+
+ return false;
+}
+
static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
- if (smu10_data->gfx_off_controled_by_driver)
+ if (smu10_data->gfx_off_controled_by_driver) {
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
+ /* confirm gfx is back to "on" state */
+ while (!smu10_is_gfx_on(hwmgr))
+ msleep(1);
+ }
+
return 0;
}
@@ -267,6 +302,14 @@ static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
return smu10_enable_gfx_off(hwmgr);
}
+static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
+{
+ if (enable)
+ return smu10_enable_gfx_off(hwmgr);
+ else
+ return smu10_disable_gfx_off(hwmgr);
+}
+
static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *prequest_ps,
const struct pp_power_state *pcurrent_ps)
@@ -340,7 +383,7 @@ static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
{
- int result;
+ uint32_t result;
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
DpmClocks_t *table = &(smu10_data->clock_table);
@@ -386,11 +429,11 @@ static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
result = smum_get_argument(hwmgr);
- smu10_data->gfx_min_freq_limit = result * 100;
+ smu10_data->gfx_min_freq_limit = result / 10 * 1000;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
result = smum_get_argument(hwmgr);
- smu10_data->gfx_max_freq_limit = result * 100;
+ smu10_data->gfx_max_freq_limit = result / 10 * 1000;
return 0;
}
@@ -436,8 +479,8 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
- hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
- hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
+ hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
+ hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
return result;
}
@@ -472,6 +515,8 @@ static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
+ struct smu10_hwmgr *data = hwmgr->backend;
+
if (hwmgr->smu_version < 0x1E3700) {
pr_info("smu firmware version too old, can not set dpm level\n");
return 0;
@@ -482,7 +527,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- SMU10_UMD_PSTATE_PEAK_GFXCLK);
+ data->gfx_max_freq_limit/100);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
SMU10_UMD_PSTATE_PEAK_FCLK);
@@ -495,7 +540,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- SMU10_UMD_PSTATE_PEAK_GFXCLK);
+ data->gfx_max_freq_limit/100);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
SMU10_UMD_PSTATE_PEAK_FCLK);
@@ -509,10 +554,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- SMU10_UMD_PSTATE_MIN_GFXCLK);
+ data->gfx_min_freq_limit/100);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- SMU10_UMD_PSTATE_MIN_GFXCLK);
+ data->gfx_min_freq_limit/100);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
@@ -552,7 +597,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_AUTO:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- SMU10_UMD_PSTATE_MIN_GFXCLK);
+ data->gfx_min_freq_limit/100);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
SMU10_UMD_PSTATE_MIN_FCLK);
@@ -565,7 +610,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- SMU10_UMD_PSTATE_PEAK_GFXCLK);
+ data->gfx_max_freq_limit/100);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
SMU10_UMD_PSTATE_PEAK_FCLK);
@@ -579,10 +624,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_LOW:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- SMU10_UMD_PSTATE_MIN_GFXCLK);
+ data->gfx_min_freq_limit/100);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- SMU10_UMD_PSTATE_MIN_GFXCLK);
+ data->gfx_min_freq_limit/100);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
SMU10_UMD_PSTATE_MIN_FCLK);
@@ -699,6 +744,16 @@ static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
{
+ struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (separation_time != data->separation_time ||
+ cc6_disable != data->cc6_disable ||
+ pstate_disable != data->pstate_disable) {
+ data->separation_time = separation_time;
+ data->cc6_disable = cc6_disable;
+ data->pstate_disable = pstate_disable;
+ data->cc6_setting_changed = true;
+ }
return 0;
}
@@ -711,6 +766,51 @@ static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
+ struct smu10_hwmgr *data = hwmgr->backend;
+ struct smu10_voltage_dependency_table *mclk_table =
+ data->clock_vol_info.vdd_dep_on_fclk;
+ uint32_t low, high;
+
+ low = mask ? (ffs(mask) - 1) : 0;
+ high = mask ? (fls(mask) - 1) : 0;
+
+ switch (type) {
+ case PP_SCLK:
+ if (low > 2 || high > 2) {
+ pr_info("Currently sclk only support 3 levels on RV\n");
+ return -EINVAL;
+ }
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ low == 2 ? data->gfx_max_freq_limit/100 :
+ low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
+ data->gfx_min_freq_limit/100);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ high == 0 ? data->gfx_min_freq_limit/100 :
+ high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
+ data->gfx_max_freq_limit/100);
+ break;
+
+ case PP_MCLK:
+ if (low > mclk_table->count - 1 || high > mclk_table->count - 1)
+ return -EINVAL;
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ mclk_table->entries[low].clk/100);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ mclk_table->entries[high].clk/100);
+ break;
+
+ case PP_PCIE:
+ default:
+ break;
+ }
return 0;
}
@@ -720,21 +820,30 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
struct smu10_voltage_dependency_table *mclk_table =
data->clock_vol_info.vdd_dep_on_fclk;
- int i, now, size = 0;
+ uint32_t i, now, size = 0;
switch (type) {
case PP_SCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
now = smum_get_argument(hwmgr);
+ /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
+ if (now == data->gfx_max_freq_limit/100)
+ i = 2;
+ else if (now == data->gfx_min_freq_limit/100)
+ i = 0;
+ else
+ i = 1;
+
size += sprintf(buf + size, "0: %uMhz %s\n",
- data->gfx_min_freq_limit / 100,
- ((data->gfx_min_freq_limit / 100)
- == now) ? "*" : "");
+ data->gfx_min_freq_limit/100,
+ i == 0 ? "*" : "");
size += sprintf(buf + size, "1: %uMhz %s\n",
- data->gfx_max_freq_limit / 100,
- ((data->gfx_max_freq_limit / 100)
- == now) ? "*" : "");
+ i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
+ i == 1 ? "*" : "");
+ size += sprintf(buf + size, "2: %uMhz %s\n",
+ data->gfx_max_freq_limit/100,
+ i == 2 ? "*" : "");
break;
case PP_MCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
@@ -947,9 +1056,8 @@ static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simpl
static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
- uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0,
- mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP);
- uint32_t reg_value = cgs_read_register(hwmgr->device, reg_offset);
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t reg_value = RREG32_SOC15(THM, 0, mmTHM_TCON_CUR_TMP);
int cur_temp =
(reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
@@ -993,6 +1101,25 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
return ret;
}
+static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
+ struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+{
+ struct smu10_hwmgr *data = hwmgr->backend;
+ Watermarks_t *table = &(data->water_marks_table);
+ int result = 0;
+
+ smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
+ smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
+ data->water_marks_exist = true;
+ return result;
+}
+
+static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
+{
+
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
+}
+
static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
{
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
@@ -1022,6 +1149,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
.get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
.get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
+ .set_watermarks_for_clocks_ranges = smu10_set_watermarks_for_clocks_ranges,
.get_max_high_clocks = smu10_get_max_high_clocks,
.read_sensor = smu10_read_sensor,
.set_active_display_count = smu10_set_active_display_count,
@@ -1032,6 +1160,8 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.power_state_set = smu10_set_power_state_tasks,
.dynamic_state_management_disable = smu10_disable_dpm_tasks,
.set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
+ .smus_notify_pwe = smu10_smus_notify_pwe,
+ .gfx_off_control = smu10_gfx_off_control,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
index 175c3a5..1fb296a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
@@ -290,6 +290,7 @@ struct smu10_hwmgr {
bool vcn_dpg_mode;
bool gfx_off_controled_by_driver;
+ bool water_marks_exist;
Watermarks_t water_marks_table;
struct smu10_clock_voltage_information clock_vol_info;
DpmClocks_t clock_table;
@@ -310,11 +311,9 @@ int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
#define SMU10_UMD_PSTATE_FCLK 933
#define SMU10_UMD_PSTATE_VCE 0x03C00320
-#define SMU10_UMD_PSTATE_PEAK_GFXCLK 1100
#define SMU10_UMD_PSTATE_PEAK_SOCCLK 757
#define SMU10_UMD_PSTATE_PEAK_FCLK 1200
-#define SMU10_UMD_PSTATE_MIN_GFXCLK 200
#define SMU10_UMD_PSTATE_MIN_FCLK 400
#define SMU10_UMD_PSTATE_MIN_SOCCLK 200
#define SMU10_UMD_PSTATE_MIN_VCE 0x0190012C
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index f4cbaee..6d72a56 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -147,20 +147,20 @@ void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
data->uvd_power_gated = bgate;
if (bgate) {
- cgs_set_powergating_state(hwmgr->device,
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
- cgs_set_clockgating_state(hwmgr->device,
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
smu7_update_uvd_dpm(hwmgr, true);
smu7_powerdown_uvd(hwmgr);
} else {
smu7_powerup_uvd(hwmgr);
- cgs_set_clockgating_state(hwmgr->device,
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
- cgs_set_powergating_state(hwmgr->device,
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE);
smu7_update_uvd_dpm(hwmgr, false);
@@ -175,20 +175,20 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
data->vce_power_gated = bgate;
if (bgate) {
- cgs_set_powergating_state(hwmgr->device,
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
- cgs_set_clockgating_state(hwmgr->device,
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
smu7_update_vce_dpm(hwmgr, true);
smu7_powerdown_vce(hwmgr);
} else {
smu7_powerup_vce(hwmgr);
- cgs_set_clockgating_state(hwmgr->device,
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
- cgs_set_powergating_state(hwmgr->device,
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_UNGATE);
smu7_update_vce_dpm(hwmgr, false);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 26fbeaf..8eb3f51 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -61,10 +61,6 @@
#define SMC_CG_IND_START 0xc0030000
#define SMC_CG_IND_END 0xc0040000
-#define VOLTAGE_SCALE 4
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-
#define MEM_FREQ_LOW_LATENCY 25000
#define MEM_FREQ_HIGH_LATENCY 80000
@@ -79,14 +75,23 @@
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
-static const struct profile_mode_setting smu7_profiling[5] =
+static const struct profile_mode_setting smu7_profiling[6] =
{{1, 0, 100, 30, 1, 0, 100, 10},
{1, 10, 0, 30, 0, 0, 0, 0},
{0, 0, 0, 0, 1, 10, 16, 31},
{1, 0, 11, 50, 1, 0, 100, 10},
{1, 0, 5, 30, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0, 0, 0},
};
+#define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310)
+
+#define ixPWR_SVI2_PLANE1_LOAD 0xC0200280
+#define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L
+#define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L
+#define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005
+#define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006
+
/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
enum DPM_EVENT_SRC {
DPM_EVENT_SRC_ANALOG = 0,
@@ -168,6 +173,13 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
*/
static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
{
+ if (hwmgr->chip_id == CHIP_VEGAM) {
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
+ }
+
if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
@@ -797,32 +809,6 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
return 0;
}
-static int smu7_get_voltage_dependency_table(
- const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
- uint8_t i = 0;
- PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
- "Voltage Lookup Table empty",
- return -EINVAL);
-
- dep_table->count = allowed_dep_table->count;
- for (i=0; i<dep_table->count; i++) {
- dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
- dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
- dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
- dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
- dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
- dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
- dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
- dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
- dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
- dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
- }
-
- return 0;
-}
-
static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -850,7 +836,7 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
entries[i].vddc = dep_sclk_table->entries[i].vddc;
}
- smu7_get_voltage_dependency_table(dep_sclk_table,
+ smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
(struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
odn_table->odn_memory_clock_dpm_levels.num_of_pl =
@@ -862,12 +848,39 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
entries[i].vddc = dep_mclk_table->entries[i].vddc;
}
- smu7_get_voltage_dependency_table(dep_mclk_table,
+ smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
(struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
return 0;
}
+static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t min_vddc, max_vddc;
+
+ if (!table_info)
+ return;
+
+ dep_sclk_table = table_info->vdd_dep_on_sclk;
+
+ atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
+
+ if (min_vddc == 0 || min_vddc > 2000
+ || min_vddc > dep_sclk_table->entries[0].vddc)
+ min_vddc = dep_sclk_table->entries[0].vddc;
+
+ if (max_vddc == 0 || max_vddc > 2000
+ || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
+ max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
+
+ data->odn_dpm_table.min_vddc = min_vddc;
+ data->odn_dpm_table.max_vddc = max_vddc;
+}
+
static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -886,8 +899,10 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
sizeof(struct smu7_dpm_table));
/* initialize ODN table */
- if (hwmgr->od_enabled)
+ if (hwmgr->od_enabled) {
+ smu7_setup_voltage_range_from_vbios(hwmgr);
smu7_odn_initial_default_setting(hwmgr);
+ }
return 0;
}
@@ -965,6 +980,22 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
return 0;
}
+static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t soft_register_value = 0;
+ uint32_t handshake_disables_offset = data->soft_regs_start
+ + smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, HandshakeDisables);
+
+ soft_register_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, handshake_disables_offset);
+ soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ handshake_disables_offset, soft_register_value);
+ return 0;
+}
+
static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -988,6 +1019,9 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
/* enable SCLK dpm */
if (!data->sclk_dpm_key_disabled)
+ if (hwmgr->chip_id == CHIP_VEGAM)
+ smu7_disable_sclk_vce_handshake(hwmgr);
+
PP_ASSERT_WITH_CODE(
(0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
"Failed to enable SCLK DPM during DPM Start Function!",
@@ -997,13 +1031,15 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if (0 == data->mclk_dpm_key_disabled) {
if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
smu7_disable_handshake_uvd(hwmgr);
+
PP_ASSERT_WITH_CODE(
(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_MCLKDPM_Enable)),
"Failed to enable MCLK DPM during DPM Start Function!",
return -EINVAL);
- PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
+ if (hwmgr->chip_family != CHIP_VEGAM)
+ PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
@@ -1019,8 +1055,13 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
udelay(10);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
+ if (hwmgr->chip_id == CHIP_VEGAM) {
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
+ } else {
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
+ }
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
}
}
@@ -1229,7 +1270,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
tmp_result = smu7_construct_voltage_tables(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to contruct voltage tables!",
+ "Failed to construct voltage tables!",
result = tmp_result);
}
smum_initialize_mc_reg_table(hwmgr);
@@ -1261,10 +1302,12 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to process firmware header!", result = tmp_result);
- tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize switch from ArbF0 to F1!",
- result = tmp_result);
+ if (hwmgr->chip_id != CHIP_VEGAM) {
+ tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to initialize switch from ArbF0 to F1!",
+ result = tmp_result);
+ }
result = smu7_setup_default_dpm_tables(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
@@ -2754,6 +2797,9 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
case CHIP_POLARIS12:
switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
break;
+ case CHIP_VEGAM:
+ switch_limit_us = 30;
+ break;
default:
switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
break;
@@ -2777,8 +2823,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct PP_Clocks minimum_clocks = {0};
bool disable_mclk_switching;
bool disable_mclk_switching_for_frame_lock;
- struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info = {0};
const struct phm_clock_and_voltage_limits *max_limits;
uint32_t i;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -2787,7 +2831,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
int32_t count;
int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
- info.mode_info = &mode_info;
data->battery_state = (PP_StateUILabel_Battery ==
request_ps->classification.ui_label);
@@ -2809,10 +2852,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
}
}
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
- minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
+ minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
+ minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState)) {
@@ -2843,12 +2884,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
- if (info.display_count == 0)
+ if (hwmgr->display_config->num_display == 0)
disable_mclk_switching = false;
else
- disable_mclk_switching = ((1 < info.display_count) ||
+ disable_mclk_switching = ((1 < hwmgr->display_config->num_display) ||
disable_mclk_switching_for_frame_lock ||
- smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us));
+ smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
sclk = smu7_ps->performance_levels[0].engine_clock;
mclk = smu7_ps->performance_levels[0].memory_clock;
@@ -2957,8 +2998,7 @@ static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
/* First retrieve the Boot clocks and VDDC from the firmware info table.
* We assume here that fw_info is unchanged if this call fails.
*/
- fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
- hwmgr->device, index,
+ fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
&size, &frev, &crev);
if (!fw_info)
/* During a test, there is no firmware info table. */
@@ -3366,34 +3406,35 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
return 0;
}
-static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr,
- struct pp_gpu_power *query)
+static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
{
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PmStatusLogStart),
- "Failed to start pm status log!",
- return -1);
+ int i;
+ u32 tmp = 0;
- /* Sampling period from 50ms to 4sec */
- msleep_interruptible(200);
+ if (!query)
+ return -EINVAL;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PmStatusLogSample),
- "Failed to sample pm status log!",
- return -1);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
+ tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ *query = tmp;
- query->vddc_power = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC,
- ixSMU_PM_STATUS_40);
- query->vddci_power = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC,
- ixSMU_PM_STATUS_49);
- query->max_gpu_power = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC,
- ixSMU_PM_STATUS_94);
- query->average_gpu_power = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC,
- ixSMU_PM_STATUS_95);
+ if (tmp != 0)
+ return 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_PM_STATUS_94, 0);
+
+ for (i = 0; i < 10; i++) {
+ mdelay(1);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
+ tmp = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC,
+ ixSMU_PM_STATUS_94);
+ if (tmp != 0)
+ break;
+ }
+ *query = tmp;
return 0;
}
@@ -3446,10 +3487,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GPU_POWER:
- if (*size < sizeof(struct pp_gpu_power))
- return -EINVAL;
- *size = sizeof(struct pp_gpu_power);
- return smu7_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
+ return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
case AMDGPU_PP_SENSOR_VDDGFX:
if ((data->vr_config & 0xff) == 0x2)
val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
@@ -3480,7 +3518,6 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
[smu7_ps->performance_level_count - 1].memory_clock;
struct PP_Clocks min_clocks = {0};
uint32_t i;
- struct cgs_display_info info = {0};
for (i = 0; i < sclk_table->count; i++) {
if (sclk == sclk_table->dpm_levels[i].value)
@@ -3507,9 +3544,8 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
if (i >= mclk_table->count)
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
- cgs_get_active_displays_info(hwmgr->device, &info);
- if (data->display_timing.num_existing_displays != info.display_count)
+ if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
return 0;
@@ -3812,9 +3848,14 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
- smum_send_msg_to_smc_with_parameter(hwmgr,
- (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+ if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
+ if (hwmgr->chip_id == CHIP_VEGAM)
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2);
+ else
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+ }
return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
}
@@ -3908,15 +3949,8 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
static int
smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
{
- uint32_t num_active_displays = 0;
- struct cgs_display_info info = {0};
-
- info.mode_info = NULL;
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- num_active_displays = info.display_count;
-
- if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true)
+ if (hwmgr->display_config->num_display > 1 &&
+ !hwmgr->display_config->multi_monitor_in_sync)
smu7_notify_smc_display_change(hwmgr, false);
return 0;
@@ -3931,33 +3965,24 @@ smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t num_active_displays = 0;
uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
uint32_t display_gap2;
uint32_t pre_vbi_time_in_us;
uint32_t frame_time_in_us;
- uint32_t ref_clock;
- uint32_t refresh_rate = 0;
- struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info = {0};
+ uint32_t ref_clock, refresh_rate;
- info.mode_info = &mode_info;
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_active_displays = info.display_count;
-
- display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
+ display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
-
- refresh_rate = mode_info.refresh_rate;
+ refresh_rate = hwmgr->display_config->vrefresh;
if (0 == refresh_rate)
refresh_rate = 60;
frame_time_in_us = 1000000 / refresh_rate;
- pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+ pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
data->frame_time_x2 = frame_time_in_us * 2 / 100;
@@ -4037,17 +4062,14 @@ smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
bool is_update_required = false;
- struct cgs_display_info info = {0, 0, NULL};
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
+ if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
is_update_required = true;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
- if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr &&
+ if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
(data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
- hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
+ hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
is_update_required = true;
}
return is_update_required;
@@ -4102,7 +4124,7 @@ static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
return 0;
}
-static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -4181,13 +4203,9 @@ static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t temp;
-
- temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
+ struct amdgpu_device *adev = hwmgr->adev;
- data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
- ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
- MC_SEQ_MISC0_GDDR5_SHIFT));
+ data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
return 0;
}
@@ -4235,7 +4253,7 @@ static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
- smu7_upload_mc_firmware(hwmgr);
+ smu7_check_mc_firmware(hwmgr);
tmp_result = smu7_read_clock_registers(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -4370,22 +4388,36 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
- size = sprintf(buf, "%s: \n", "OD_SCLK");
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
for (i = 0; i < odn_sclk_table->num_of_pl; i++)
- size += sprintf(buf + size, "%d: %10uMhz %10u mV\n",
- i, odn_sclk_table->entries[i].clock / 100,
+ size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
+ i, odn_sclk_table->entries[i].clock/100,
odn_sclk_table->entries[i].vddc);
}
break;
case OD_MCLK:
if (hwmgr->od_enabled) {
- size = sprintf(buf, "%s: \n", "OD_MCLK");
+ size = sprintf(buf, "%s:\n", "OD_MCLK");
for (i = 0; i < odn_mclk_table->num_of_pl; i++)
- size += sprintf(buf + size, "%d: %10uMhz %10u mV\n",
- i, odn_mclk_table->entries[i].clock / 100,
+ size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
+ i, odn_mclk_table->entries[i].clock/100,
odn_mclk_table->entries[i].vddc);
}
break;
+ case OD_RANGE:
+ if (hwmgr->od_enabled) {
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+ data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
+ hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
+ size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
+ data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
+ size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
+ data->odn_dpm_table.min_vddc,
+ data->odn_dpm_table.max_vddc);
+ }
+ break;
default:
break;
}
@@ -4669,36 +4701,27 @@ static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t min_vddc;
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
-
- if (table_info == NULL)
- return false;
-
- dep_sclk_table = table_info->vdd_dep_on_sclk;
- min_vddc = dep_sclk_table->entries[0].vddc;
-
- if (voltage < min_vddc || voltage > 2000) {
- pr_info("OD voltage is out of range [%d - 2000] mV\n", min_vddc);
+ if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
+ pr_info("OD voltage is out of range [%d - %d] mV\n",
+ data->odn_dpm_table.min_vddc,
+ data->odn_dpm_table.max_vddc);
return false;
}
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
- if (data->vbios_boot_state.sclk_bootup_value > clk ||
+ if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
pr_info("OD engine clock is out of range [%d - %d] MHz\n",
- data->vbios_boot_state.sclk_bootup_value,
- hwmgr->platform_descriptor.overdriveLimit.engineClock / 100);
+ data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
+ hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
return false;
}
} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
- if (data->vbios_boot_state.mclk_bootup_value > clk ||
+ if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
pr_info("OD memory clock is out of range [%d - %d] MHz\n",
- data->vbios_boot_state.mclk_bootup_value/100,
- hwmgr->platform_descriptor.overdriveLimit.memoryClock / 100);
+ data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
return false;
}
} else {
@@ -4747,10 +4770,6 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
return;
}
}
- if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
- data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
- }
dep_table = table_info->vdd_dep_on_sclk;
odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
@@ -4760,9 +4779,9 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
return;
}
}
- if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+ if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
}
}
@@ -4864,6 +4883,17 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting);
for (i = 0; i < len; i++) {
+ if (i == hwmgr->power_profile_mode) {
+ size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
+ i, profile_name[i], "*",
+ data->current_profile_setting.sclk_up_hyst,
+ data->current_profile_setting.sclk_down_hyst,
+ data->current_profile_setting.sclk_activity,
+ data->current_profile_setting.mclk_up_hyst,
+ data->current_profile_setting.mclk_down_hyst,
+ data->current_profile_setting.mclk_activity);
+ continue;
+ }
if (smu7_profiling[i].bupdate_sclk)
size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
@@ -4883,24 +4913,6 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
"-", "-", "-");
}
- size += sprintf(buf + size, "%3d %16s: %8d %16d %16d %16d %16d %16d\n",
- i, profile_name[i],
- data->custom_profile_setting.sclk_up_hyst,
- data->custom_profile_setting.sclk_down_hyst,
- data->custom_profile_setting.sclk_activity,
- data->custom_profile_setting.mclk_up_hyst,
- data->custom_profile_setting.mclk_down_hyst,
- data->custom_profile_setting.mclk_activity);
-
- size += sprintf(buf + size, "%3s %16s: %8d %16d %16d %16d %16d %16d\n",
- "*", "CURRENT",
- data->current_profile_setting.sclk_up_hyst,
- data->current_profile_setting.sclk_down_hyst,
- data->current_profile_setting.sclk_activity,
- data->current_profile_setting.mclk_up_hyst,
- data->current_profile_setting.mclk_down_hyst,
- data->current_profile_setting.mclk_activity);
-
return size;
}
@@ -4939,16 +4951,16 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
if (size < 8)
return -EINVAL;
- data->custom_profile_setting.bupdate_sclk = input[0];
- data->custom_profile_setting.sclk_up_hyst = input[1];
- data->custom_profile_setting.sclk_down_hyst = input[2];
- data->custom_profile_setting.sclk_activity = input[3];
- data->custom_profile_setting.bupdate_mclk = input[4];
- data->custom_profile_setting.mclk_up_hyst = input[5];
- data->custom_profile_setting.mclk_down_hyst = input[6];
- data->custom_profile_setting.mclk_activity = input[7];
- if (!smum_update_dpm_settings(hwmgr, &data->custom_profile_setting)) {
- memcpy(&data->current_profile_setting, &data->custom_profile_setting, sizeof(struct profile_mode_setting));
+ tmp.bupdate_sclk = input[0];
+ tmp.sclk_up_hyst = input[1];
+ tmp.sclk_down_hyst = input[2];
+ tmp.sclk_activity = input[3];
+ tmp.bupdate_mclk = input[4];
+ tmp.mclk_up_hyst = input[5];
+ tmp.mclk_down_hyst = input[6];
+ tmp.mclk_activity = input[7];
+ if (!smum_update_dpm_settings(hwmgr, &tmp)) {
+ memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
hwmgr->power_profile_mode = mode;
}
break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index f40179c..c91e75d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -184,6 +184,8 @@ struct smu7_odn_dpm_table {
struct smu7_odn_clock_voltage_dependency_table vdd_dependency_on_sclk;
struct smu7_odn_clock_voltage_dependency_table vdd_dependency_on_mclk;
uint32_t odn_mclk_min_limit;
+ uint32_t min_vddc;
+ uint32_t max_vddc;
};
struct profile_mode_setting {
@@ -325,7 +327,6 @@ struct smu7_hwmgr {
uint16_t mem_latency_high;
uint16_t mem_latency_low;
uint32_t vr_config;
- struct profile_mode_setting custom_profile_setting;
struct profile_mode_setting current_profile_setting;
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 03bc745..99b29ff 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -623,6 +623,190 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] =
{ 0xFFFFFFFF } /* End of list */
};
+static const struct gpu_pt_config_reg GCCACConfig_VegaM[] =
+{
+// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+// Offset Mask Shift Value Type
+// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ // DIDT_SQ
+ //
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, GPU_CONFIGREG_GC_CAC_IND },
+
+ // DIDT_TD
+ //
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, GPU_CONFIGREG_GC_CAC_IND },
+
+ // DIDT_TCP
+ //
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, GPU_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, GPU_CONFIGREG_GC_CAC_IND },
+
+ { 0xFFFFFFFF } // End of list
+};
+
+static const struct gpu_pt_config_reg DIDTConfig_VegaM[] =
+{
+// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+// Offset Mask Shift Value Type
+// ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ // DIDT_SQ
+ //
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ // DIDT_TD
+ //
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ // DIDT_TCP
+ //
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { 0xFFFFFFFF } // End of list
+};
static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
{
uint32_t en = enable ? 1 : 0;
@@ -740,8 +924,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_CAP(PHM_PlatformCaps_TDRamping) ||
PP_CAP(PHM_PlatformCaps_TCPRamping)) {
- cgs_enter_safe_mode(hwmgr->device, true);
- cgs_lock_grbm_idx(hwmgr->device, true);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ mutex_lock(&adev->grbm_idx_mutex);
value = 0;
value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
for (count = 0; count < num_se; count++) {
@@ -768,6 +952,11 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris12);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+ } else if (hwmgr->chip_id == CHIP_VEGAM) {
+ result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_VegaM);
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+ result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_VegaM);
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
}
}
cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
@@ -781,8 +970,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == result),
"Failed to enable DPM DIDT.", return result);
}
- cgs_lock_grbm_idx(hwmgr->device, false);
- cgs_enter_safe_mode(hwmgr->device, false);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
return 0;
@@ -791,13 +980,14 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
{
int result;
+ struct amdgpu_device *adev = hwmgr->adev;
if (PP_CAP(PHM_PlatformCaps_SQRamping) ||
PP_CAP(PHM_PlatformCaps_DBRamping) ||
PP_CAP(PHM_PlatformCaps_TDRamping) ||
PP_CAP(PHM_PlatformCaps_TCPRamping)) {
- cgs_enter_safe_mode(hwmgr->device, true);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
result = smu7_enable_didt(hwmgr, false);
PP_ASSERT_WITH_CODE((result == 0),
@@ -809,7 +999,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == result),
"Failed to disable DPM DIDT.", return result);
}
- cgs_enter_safe_mode(hwmgr->device, false);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
return 0;
@@ -852,12 +1042,10 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- n = (n & 0xff) << 8;
-
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PkgPwrSetLimit, n);
+ PPSMC_MSG_PkgPwrSetLimit, n<<8);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 7b26607..50690c7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -314,8 +314,7 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr)
uint8_t frev, crev;
uint16_t size;
- info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *) cgs_atom_get_data_table(
- hwmgr->device,
+ info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *)smu_atom_get_data_table(hwmgr->adev,
GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
&size, &frev, &crev);
@@ -694,7 +693,7 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
else
data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
- clock = hwmgr->display_config.min_core_set_clock;
+ clock = hwmgr->display_config->min_core_set_clock;
if (clock == 0)
pr_debug("min_core_set_clock not set\n");
@@ -749,7 +748,7 @@ static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
- uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr;
+ uint32_t clks = hwmgr->display_config->min_core_set_clock_in_sr;
if (clks == 0)
clks = SMU8_MIN_DEEP_SLEEP_SCLK;
@@ -1041,25 +1040,21 @@ static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct smu8_hwmgr *data = hwmgr->backend;
struct PP_Clocks clocks = {0, 0, 0, 0};
bool force_high;
- uint32_t num_of_active_displays = 0;
- struct cgs_display_info info = {0};
smu8_ps->need_dfs_bypass = true;
data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
- clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ?
- hwmgr->display_config.min_mem_set_clock :
+ clocks.memoryClock = hwmgr->display_config->min_mem_set_clock != 0 ?
+ hwmgr->display_config->min_mem_set_clock :
data->sys_info.nbp_memory_clock[1];
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_of_active_displays = info.display_count;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1])
- || (num_of_active_displays >= 3);
+ || (hwmgr->display_config->num_display >= 3);
smu8_ps->action = smu8_current_ps->action;
@@ -1897,20 +1892,20 @@ static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
data->uvd_power_gated = bgate;
if (bgate) {
- cgs_set_powergating_state(hwmgr->device,
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
- cgs_set_clockgating_state(hwmgr->device,
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
smu8_dpm_update_uvd_dpm(hwmgr, true);
smu8_dpm_powerdown_uvd(hwmgr);
} else {
smu8_dpm_powerup_uvd(hwmgr);
- cgs_set_clockgating_state(hwmgr->device,
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
- cgs_set_powergating_state(hwmgr->device,
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE);
smu8_dpm_update_uvd_dpm(hwmgr, false);
@@ -1923,12 +1918,10 @@ static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
struct smu8_hwmgr *data = hwmgr->backend;
if (bgate) {
- cgs_set_powergating_state(
- hwmgr->device,
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
- cgs_set_clockgating_state(
- hwmgr->device,
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
smu8_enable_disable_vce_dpm(hwmgr, false);
@@ -1937,12 +1930,10 @@ static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
} else {
smu8_dpm_powerup_vce(hwmgr);
data->vce_power_gated = false;
- cgs_set_clockgating_state(
- hwmgr->device,
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
- cgs_set_powergating_state(
- hwmgr->device,
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_UNGATE);
smu8_dpm_update_vce_dpm(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 5981228..93a3d02 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -24,6 +24,7 @@
#include "pp_debug.h"
#include "ppatomctrl.h"
#include "ppsmc.h"
+#include "atom.h"
uint8_t convert_to_vid(uint16_t vddc)
{
@@ -608,3 +609,100 @@ int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
return 0;
}
+
+void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
+ uint8_t *frev, uint8_t *crev)
+{
+ struct amdgpu_device *adev = dev;
+ uint16_t data_start;
+
+ if (amdgpu_atom_parse_data_header(
+ adev->mode_info.atom_context, table, size,
+ frev, crev, &data_start))
+ return (uint8_t *)adev->mode_info.atom_context->bios +
+ data_start;
+
+ return NULL;
+}
+
+int smu_get_voltage_dependency_table_ppt_v1(
+ const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
+{
+ uint8_t i = 0;
+ PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
+ "Voltage Lookup Table empty",
+ return -EINVAL);
+
+ dep_table->count = allowed_dep_table->count;
+ for (i=0; i<dep_table->count; i++) {
+ dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
+ dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
+ dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
+ dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
+ dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
+ dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
+ dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
+ dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
+ dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
+ dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
+ }
+
+ return 0;
+}
+
+int smu_set_watermarks_for_clocks_ranges(void *wt_table,
+ struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+{
+ uint32_t i;
+ struct watermarks *table = wt_table;
+
+ if (!table || !wm_with_clock_ranges)
+ return -EINVAL;
+
+ if (wm_with_clock_ranges->num_wm_sets_dmif > 4 || wm_with_clock_ranges->num_wm_sets_mcif > 4)
+ return -EINVAL;
+
+ for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
+ table->WatermarkRow[1][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
+ 100);
+ table->WatermarkRow[1][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
+ 100);
+ table->WatermarkRow[1][i].MinUclk =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
+ 100);
+ table->WatermarkRow[1][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
+ 100);
+ table->WatermarkRow[1][i].WmSetting = (uint8_t)
+ wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
+ }
+
+ for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
+ table->WatermarkRow[0][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
+ 100);
+ table->WatermarkRow[0][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
+ 100);
+ table->WatermarkRow[0][i].MinUclk =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
+ 100);
+ table->WatermarkRow[0][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+ (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
+ 100);
+ table->WatermarkRow[0][i].WmSetting = (uint8_t)
+ wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
index d37d16e..916cc01 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
@@ -26,10 +26,27 @@
struct pp_atomctrl_voltage_table;
struct pp_hwmgr;
struct phm_ppt_v1_voltage_lookup_table;
+struct Watermarks_t;
+struct pp_wm_sets_with_clock_ranges_soc15;
uint8_t convert_to_vid(uint16_t vddc);
uint16_t convert_to_vddc(uint8_t vid);
+struct watermark_row_generic_t {
+ uint16_t MinClock;
+ uint16_t MaxClock;
+ uint16_t MinUclk;
+ uint16_t MaxUclk;
+
+ uint8_t WmSetting;
+ uint8_t Padding[3];
+};
+
+struct watermarks {
+ struct watermark_row_generic_t WatermarkRow[2][4];
+ uint32_t padding[7];
+};
+
extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
uint32_t index,
uint32_t value, uint32_t mask);
@@ -82,6 +99,16 @@ int phm_irq_process(struct amdgpu_device *adev,
int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr);
+void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
+ uint8_t *frev, uint8_t *crev);
+
+int smu_get_voltage_dependency_table_ppt_v1(
+ const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table);
+
+int smu_set_watermarks_for_clocks_ranges(void *wt_table,
+ struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+
#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 7cbb56b..d156b7b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -36,7 +36,7 @@
#include "smu9.h"
#include "smu9_driver_if.h"
#include "vega10_inc.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
#include "pppcielanes.h"
#include "vega10_hwmgr.h"
#include "vega10_processpptables.h"
@@ -51,10 +51,6 @@
#include "smuio/smuio_9_0_offset.h"
#include "smuio/smuio_9_0_sh_mask.h"
-#define VOLTAGE_SCALE 4
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-
#define HBM_MEMORY_CHANNEL_WIDTH 128
static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
@@ -79,8 +75,6 @@ static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
-static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, uint32_t mask);
static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
@@ -291,6 +285,48 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
return 0;
}
+static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ struct phm_ppt_v2_information *table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
+ struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+ struct vega10_odn_vddc_lookup_table *od_lookup_table;
+ struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
+ struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
+ uint32_t i;
+
+ od_lookup_table = &odn_table->vddc_lookup_table;
+ vddc_lookup_table = table_info->vddc_lookup_table;
+
+ for (i = 0; i < vddc_lookup_table->count; i++)
+ od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd;
+
+ od_lookup_table->count = vddc_lookup_table->count;
+
+ dep_table[0] = table_info->vdd_dep_on_sclk;
+ dep_table[1] = table_info->vdd_dep_on_mclk;
+ dep_table[2] = table_info->vdd_dep_on_socclk;
+ od_table[0] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_sclk;
+ od_table[1] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_mclk;
+ od_table[2] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_socclk;
+
+ for (i = 0; i < 3; i++)
+ smu_get_voltage_dependency_table_ppt_v1(dep_table[i], od_table[i]);
+
+ if (odn_table->max_vddc == 0 || odn_table->max_vddc > 2000)
+ odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc;
+ if (odn_table->min_vddc == 0 || odn_table->min_vddc > 2000)
+ odn_table->min_vddc = dep_table[0]->entries[0].vddc;
+
+ i = od_table[2]->count - 1;
+ od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock;
+ od_table[2]->entries[i].vddc = odn_table->max_vddc;
+
+ return 0;
+}
+
static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -427,7 +463,6 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
/* ACG firmware has major version 5 */
if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
data->smu_features[GNLD_ACG].supported = true;
-
if (data->registry_data.didt_support)
data->smu_features[GNLD_DIDT].supported = true;
@@ -754,7 +789,6 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
uint32_t config_telemetry = 0;
struct pp_atomfwctrl_voltage_table vol_table;
struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
if (data == NULL)
@@ -860,10 +894,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
advanceFanControlParameters.usFanPWMMinLimit *
hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
- reg = soc15_get_register_offset(DF_HWID, 0,
- mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
- mmDF_CS_AON0_DramBaseAddress0);
- data->mem_channels = (cgs_read_register(hwmgr->device, reg) &
+ data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) &
DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
@@ -1370,48 +1401,6 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
memcpy(&(data->golden_dpm_table), &(data->dpm_table),
sizeof(struct vega10_dpm_table));
- if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
- PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
- data->odn_dpm_table.odn_core_clock_dpm_levels.num_of_pl =
- data->dpm_table.gfx_table.count;
- for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
- data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].clock =
- data->dpm_table.gfx_table.dpm_levels[i].value;
- data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].enabled = true;
- }
-
- data->odn_dpm_table.vdd_dependency_on_sclk.count =
- dep_gfx_table->count;
- for (i = 0; i < dep_gfx_table->count; i++) {
- data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk =
- dep_gfx_table->entries[i].clk;
- data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd =
- dep_gfx_table->entries[i].vddInd;
- data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable =
- dep_gfx_table->entries[i].cks_enable;
- data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset =
- dep_gfx_table->entries[i].cks_voffset;
- }
-
- data->odn_dpm_table.odn_memory_clock_dpm_levels.num_of_pl =
- data->dpm_table.mem_table.count;
- for (i = 0; i < data->dpm_table.mem_table.count; i++) {
- data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].clock =
- data->dpm_table.mem_table.dpm_levels[i].value;
- data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].enabled = true;
- }
-
- data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
- for (i = 0; i < dep_mclk_table->count; i++) {
- data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk =
- dep_mclk_table->entries[i].clk;
- data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd =
- dep_mclk_table->entries[i].vddInd;
- data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci =
- dep_mclk_table->entries[i].vddci;
- }
- }
-
return 0;
}
@@ -1514,18 +1503,18 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
{
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk =
- table_info->vdd_dep_on_sclk;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk;
struct vega10_hwmgr *data = hwmgr->backend;
struct pp_atomfwctrl_clock_dividers_soc15 dividers;
uint32_t gfx_max_clock =
hwmgr->platform_descriptor.overdriveLimit.engineClock;
uint32_t i = 0;
- if (data->apply_overdrive_next_settings_mask &
- DPMTABLE_OD_UPDATE_VDDC)
+ if (hwmgr->od_enabled)
dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
- &(data->odn_dpm_table.vdd_dependency_on_sclk);
+ &(data->odn_dpm_table.vdd_dep_on_sclk);
+ else
+ dep_on_sclk = table_info->vdd_dep_on_sclk;
PP_ASSERT_WITH_CODE(dep_on_sclk,
"Invalid SOC_VDD-GFX_CLK Dependency Table!",
@@ -1577,23 +1566,32 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
uint32_t soc_clock, uint8_t *current_soc_did,
uint8_t *current_vol_index)
{
+ struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc =
- table_info->vdd_dep_on_socclk;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc;
struct pp_atomfwctrl_clock_dividers_soc15 dividers;
uint32_t i;
- PP_ASSERT_WITH_CODE(dep_on_soc,
- "Invalid SOC_VDD-SOC_CLK Dependency Table!",
- return -EINVAL);
- for (i = 0; i < dep_on_soc->count; i++) {
- if (dep_on_soc->entries[i].clk == soc_clock)
- break;
+ if (hwmgr->od_enabled) {
+ dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *)
+ &data->odn_dpm_table.vdd_dep_on_socclk;
+ for (i = 0; i < dep_on_soc->count; i++) {
+ if (dep_on_soc->entries[i].clk >= soc_clock)
+ break;
+ }
+ } else {
+ dep_on_soc = table_info->vdd_dep_on_socclk;
+ for (i = 0; i < dep_on_soc->count; i++) {
+ if (dep_on_soc->entries[i].clk == soc_clock)
+ break;
+ }
}
+
PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
"Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
return -EINVAL);
+
PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
soc_clock, &dividers),
@@ -1602,22 +1600,6 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
*current_soc_did = (uint8_t)dividers.ulDid;
*current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
-
- return 0;
-}
-
-uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr,
- uint32_t clk,
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
- uint16_t i;
-
- for (i = 0; i < dep_table->count; i++) {
- if (dep_table->entries[i].clk == clk)
- return dep_table->entries[i].vddc;
- }
-
- pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!");
return 0;
}
@@ -1631,8 +1613,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
- table_info->vdd_dep_on_socclk;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
int result = 0;
@@ -1663,11 +1643,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
dpm_table = &(data->dpm_table.soc_table);
for (i = 0; i < dpm_table->count; i++) {
- pp_table->SocVid[i] =
- (uint8_t)convert_to_vid(
- vega10_locate_vddc_given_clock(hwmgr,
- dpm_table->dpm_levels[i].value,
- dep_table));
result = vega10_populate_single_soc_level(hwmgr,
dpm_table->dpm_levels[i].value,
&(pp_table->SocclkDid[i]),
@@ -1678,7 +1653,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
j = i - 1;
while (i < NUM_SOCCLK_DPM_LEVELS) {
- pp_table->SocVid[i] = pp_table->SocVid[j];
result = vega10_populate_single_soc_level(hwmgr,
dpm_table->dpm_levels[j].value,
&(pp_table->SocclkDid[i]),
@@ -1691,6 +1665,32 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
return result;
}
+static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ PPTable_t *pp_table = &(data->smc_state_table.pp_table);
+ struct phm_ppt_v2_information *table_info = hwmgr->pptable;
+ struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
+
+ uint8_t soc_vid = 0;
+ uint32_t i, max_vddc_level;
+
+ if (hwmgr->od_enabled)
+ vddc_lookup_table = (struct phm_ppt_v1_voltage_lookup_table *)&data->odn_dpm_table.vddc_lookup_table;
+ else
+ vddc_lookup_table = table_info->vddc_lookup_table;
+
+ max_vddc_level = vddc_lookup_table->count;
+ for (i = 0; i < max_vddc_level; i++) {
+ soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd);
+ pp_table->SocVid[i] = soc_vid;
+ }
+ while (i < MAX_REGULAR_DPM_NUMBER) {
+ pp_table->SocVid[i] = soc_vid;
+ i++;
+ }
+}
+
/**
* @brief Populates single SMC GFXCLK structure using the provided clock.
*
@@ -1705,25 +1705,25 @@ static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk =
- table_info->vdd_dep_on_mclk;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk;
struct pp_atomfwctrl_clock_dividers_soc15 dividers;
uint32_t mem_max_clock =
hwmgr->platform_descriptor.overdriveLimit.memoryClock;
uint32_t i = 0;
- if (data->apply_overdrive_next_settings_mask &
- DPMTABLE_OD_UPDATE_VDDC)
+ if (hwmgr->od_enabled)
dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
- &data->odn_dpm_table.vdd_dependency_on_mclk;
+ &data->odn_dpm_table.vdd_dep_on_mclk;
+ else
+ dep_on_mclk = table_info->vdd_dep_on_mclk;
PP_ASSERT_WITH_CODE(dep_on_mclk,
"Invalid SOC_VDD-UCLK Dependency Table!",
return -EINVAL);
- if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
+ if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
- else {
+ } else {
for (i = 0; i < dep_on_mclk->count; i++) {
if (dep_on_mclk->entries[i].clk == mem_clock)
break;
@@ -2067,6 +2067,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_AVFS].supported) {
result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
if (!result) {
+ data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
+ data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
+
pp_table->MinVoltageVid = (uint8_t)
convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
pp_table->MaxVoltageVid = (uint8_t)
@@ -2345,6 +2348,22 @@ static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
return 0;
}
+static int vega10_update_avfs(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+
+ if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+ vega10_avfs_enable(hwmgr, false);
+ } else if (data->need_update_dpm_table) {
+ vega10_avfs_enable(hwmgr, false);
+ vega10_avfs_enable(hwmgr, true);
+ } else {
+ vega10_avfs_enable(hwmgr, true);
+ }
+
+ return 0;
+}
+
static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
{
int result = 0;
@@ -2406,6 +2425,10 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
"Failed to setup default DPM tables!",
return result);
+ /* initialize ODN table */
+ if (hwmgr->od_enabled)
+ vega10_odn_initial_default_setting(hwmgr);
+
pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
VOLTAGE_OBJ_SVID2, &voltage_table);
pp_table->MaxVidStep = voltage_table.max_vid_step;
@@ -2452,6 +2475,8 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
"Failed to initialize Memory Level!",
return result);
+ vega10_populate_vddc_soc_levels(hwmgr);
+
result = vega10_populate_all_display_clock_levels(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize Display Level!",
@@ -2481,6 +2506,12 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
+ pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
+ SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk);
+
+ pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
+ SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk);
+
data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
if (0 != boot_up_values.usVddc) {
@@ -2829,7 +2860,7 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
tmp_result = vega10_construct_voltage_tables(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
- "Failed to contruct voltage tables!",
+ "Failed to construct voltage tables!",
result = tmp_result);
tmp_result = vega10_init_smc_table(hwmgr);
@@ -3028,7 +3059,6 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
bool disable_mclk_switching_for_frame_lock;
bool disable_mclk_switching_for_vr;
bool force_mclk_high;
- struct cgs_display_info info = {0};
const struct phm_clock_and_voltage_limits *max_limits;
uint32_t i;
struct vega10_hwmgr *data = hwmgr->backend;
@@ -3063,11 +3093,9 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
}
}
- cgs_get_active_displays_info(hwmgr->device, &info);
-
/* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
- minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
- minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
+ minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
+ minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
if (PP_CAP(PHM_PlatformCaps_StablePState)) {
stable_pstate_sclk_dpm_percentage =
@@ -3107,10 +3135,10 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
- if (info.display_count == 0)
+ if (hwmgr->display_config->num_display == 0)
disable_mclk_switching = false;
else
- disable_mclk_switching = (info.display_count > 1) ||
+ disable_mclk_switching = (hwmgr->display_config->num_display > 1) ||
disable_mclk_switching_for_frame_lock ||
disable_mclk_switching_for_vr ||
force_mclk_high;
@@ -3171,87 +3199,11 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- const struct vega10_power_state *vega10_ps =
- cast_const_phw_vega10_power_state(states->pnew_state);
struct vega10_hwmgr *data = hwmgr->backend;
- struct vega10_single_dpm_table *sclk_table =
- &(data->dpm_table.gfx_table);
- uint32_t sclk = vega10_ps->performance_levels
- [vega10_ps->performance_level_count - 1].gfx_clock;
- struct vega10_single_dpm_table *mclk_table =
- &(data->dpm_table.mem_table);
- uint32_t mclk = vega10_ps->performance_levels
- [vega10_ps->performance_level_count - 1].mem_clock;
- struct PP_Clocks min_clocks = {0};
- uint32_t i;
- struct cgs_display_info info = {0};
-
- data->need_update_dpm_table = 0;
-
- if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
- PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
- for (i = 0; i < sclk_table->count; i++) {
- if (sclk == sclk_table->dpm_levels[i].value)
- break;
- }
-
- if (!(data->apply_overdrive_next_settings_mask &
- DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) {
- /* Check SCLK in DAL's minimum clocks
- * in case DeepSleep divider update is required.
- */
- if (data->display_timing.min_clock_in_sr !=
- min_clocks.engineClockInSR &&
- (min_clocks.engineClockInSR >=
- VEGA10_MINIMUM_ENGINE_CLOCK ||
- data->display_timing.min_clock_in_sr >=
- VEGA10_MINIMUM_ENGINE_CLOCK))
- data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
- }
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays !=
- info.display_count)
- data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
- } else {
- for (i = 0; i < sclk_table->count; i++) {
- if (sclk == sclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= sclk_table->count)
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- else {
- /* Check SCLK in DAL's minimum clocks
- * in case DeepSleep divider update is required.
- */
- if (data->display_timing.min_clock_in_sr !=
- min_clocks.engineClockInSR &&
- (min_clocks.engineClockInSR >=
- VEGA10_MINIMUM_ENGINE_CLOCK ||
- data->display_timing.min_clock_in_sr >=
- VEGA10_MINIMUM_ENGINE_CLOCK))
- data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
- }
- for (i = 0; i < mclk_table->count; i++) {
- if (mclk == mclk_table->dpm_levels[i].value)
- break;
- }
-
- cgs_get_active_displays_info(hwmgr->device, &info);
+ if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
+ data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
- if (i >= mclk_table->count)
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
- if (data->display_timing.num_existing_displays !=
- info.display_count ||
- i >= mclk_table->count)
- data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
- }
return 0;
}
@@ -3259,194 +3211,29 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
struct pp_hwmgr *hwmgr, const void *input)
{
int result = 0;
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- const struct vega10_power_state *vega10_ps =
- cast_const_phw_vega10_power_state(states->pnew_state);
struct vega10_hwmgr *data = hwmgr->backend;
- uint32_t sclk = vega10_ps->performance_levels
- [vega10_ps->performance_level_count - 1].gfx_clock;
- uint32_t mclk = vega10_ps->performance_levels
- [vega10_ps->performance_level_count - 1].mem_clock;
- struct vega10_dpm_table *dpm_table = &data->dpm_table;
- struct vega10_dpm_table *golden_dpm_table =
- &data->golden_dpm_table;
- uint32_t dpm_count, clock_percent;
- uint32_t i;
- if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
- PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
-
- if (!data->need_update_dpm_table &&
- !data->apply_optimized_settings &&
- !data->apply_overdrive_next_settings_mask)
- return 0;
-
- if (data->apply_overdrive_next_settings_mask &
- DPMTABLE_OD_UPDATE_SCLK) {
- for (dpm_count = 0;
- dpm_count < dpm_table->gfx_table.count;
- dpm_count++) {
- dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
- data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].enabled;
- dpm_table->gfx_table.dpm_levels[dpm_count].value =
- data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].clock;
- }
- }
-
- if (data->apply_overdrive_next_settings_mask &
- DPMTABLE_OD_UPDATE_MCLK) {
- for (dpm_count = 0;
- dpm_count < dpm_table->mem_table.count;
- dpm_count++) {
- dpm_table->mem_table.dpm_levels[dpm_count].enabled =
- data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].enabled;
- dpm_table->mem_table.dpm_levels[dpm_count].value =
- data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].clock;
- }
- }
-
- if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) ||
- data->apply_optimized_settings ||
- (data->apply_overdrive_next_settings_mask &
- DPMTABLE_OD_UPDATE_SCLK)) {
- result = vega10_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) ||
- (data->apply_overdrive_next_settings_mask &
- DPMTABLE_OD_UPDATE_MCLK)){
- result = vega10_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
- } else {
- if (!data->need_update_dpm_table &&
- !data->apply_optimized_settings)
- return 0;
-
- if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK &&
- data->smu_features[GNLD_DPM_GFXCLK].supported) {
- dpm_table->
- gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
- value = sclk;
- if (hwmgr->od_enabled) {
- /* Need to do calculation based on the golden DPM table
- * as the Heatmap GPU Clock axis is also based on
- * the default values
- */
- PP_ASSERT_WITH_CODE(
- golden_dpm_table->gfx_table.dpm_levels
- [golden_dpm_table->gfx_table.count - 1].value,
- "Divide by 0!",
- return -1);
-
- dpm_count = dpm_table->gfx_table.count < 2 ?
- 0 : dpm_table->gfx_table.count - 2;
- for (i = dpm_count; i > 1; i--) {
- if (sclk > golden_dpm_table->gfx_table.dpm_levels
- [golden_dpm_table->gfx_table.count - 1].value) {
- clock_percent =
- ((sclk - golden_dpm_table->gfx_table.dpm_levels
- [golden_dpm_table->gfx_table.count - 1].value) *
- 100) /
- golden_dpm_table->gfx_table.dpm_levels
- [golden_dpm_table->gfx_table.count - 1].value;
-
- dpm_table->gfx_table.dpm_levels[i].value =
- golden_dpm_table->gfx_table.dpm_levels[i].value +
- (golden_dpm_table->gfx_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else if (golden_dpm_table->
- gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value >
- sclk) {
- clock_percent =
- ((golden_dpm_table->gfx_table.dpm_levels
- [golden_dpm_table->gfx_table.count - 1].value -
- sclk) * 100) /
- golden_dpm_table->gfx_table.dpm_levels
- [golden_dpm_table->gfx_table.count-1].value;
-
- dpm_table->gfx_table.dpm_levels[i].value =
- golden_dpm_table->gfx_table.dpm_levels[i].value -
- (golden_dpm_table->gfx_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else
- dpm_table->gfx_table.dpm_levels[i].value =
- golden_dpm_table->gfx_table.dpm_levels[i].value;
- }
- }
- }
+ if (!data->need_update_dpm_table)
+ return 0;
- if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK &&
- data->smu_features[GNLD_DPM_UCLK].supported) {
- dpm_table->
- mem_table.dpm_levels[dpm_table->mem_table.count - 1].
- value = mclk;
+ if (data->need_update_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
+ result = vega10_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
+ return result);
+ }
- if (hwmgr->od_enabled) {
- PP_ASSERT_WITH_CODE(
- golden_dpm_table->mem_table.dpm_levels
- [golden_dpm_table->mem_table.count - 1].value,
- "Divide by 0!",
- return -1);
+ if (data->need_update_dpm_table &
+ (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+ result = vega10_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
+ return result);
+ }
- dpm_count = dpm_table->mem_table.count < 2 ?
- 0 : dpm_table->mem_table.count - 2;
- for (i = dpm_count; i > 1; i--) {
- if (mclk > golden_dpm_table->mem_table.dpm_levels
- [golden_dpm_table->mem_table.count-1].value) {
- clock_percent = ((mclk -
- golden_dpm_table->mem_table.dpm_levels
- [golden_dpm_table->mem_table.count-1].value) *
- 100) /
- golden_dpm_table->mem_table.dpm_levels
- [golden_dpm_table->mem_table.count-1].value;
-
- dpm_table->mem_table.dpm_levels[i].value =
- golden_dpm_table->mem_table.dpm_levels[i].value +
- (golden_dpm_table->mem_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else if (golden_dpm_table->mem_table.dpm_levels
- [dpm_table->mem_table.count-1].value > mclk) {
- clock_percent = ((golden_dpm_table->mem_table.dpm_levels
- [golden_dpm_table->mem_table.count-1].value - mclk) *
- 100) /
- golden_dpm_table->mem_table.dpm_levels
- [golden_dpm_table->mem_table.count-1].value;
-
- dpm_table->mem_table.dpm_levels[i].value =
- golden_dpm_table->mem_table.dpm_levels[i].value -
- (golden_dpm_table->mem_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else
- dpm_table->mem_table.dpm_levels[i].value =
- golden_dpm_table->mem_table.dpm_levels[i].value;
- }
- }
- }
+ vega10_populate_vddc_soc_levels(hwmgr);
- if ((data->need_update_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) ||
- data->apply_optimized_settings) {
- result = vega10_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- if (data->need_update_dpm_table &
- (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
- result = vega10_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
- }
return result;
}
@@ -3742,8 +3529,9 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
- data->apply_optimized_settings = false;
- data->apply_overdrive_next_settings_mask = 0;
+ vega10_update_avfs(hwmgr);
+
+ data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
return 0;
}
@@ -3793,16 +3581,18 @@ static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
}
static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
- struct pp_gpu_power *query)
+ uint32_t *query)
{
uint32_t value;
+ if (!query)
+ return -EINVAL;
+
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
value = smum_get_argument(hwmgr);
- /* power value is an integer */
- memset(query, 0, sizeof *query);
- query->average_gpu_power = value << 8;
+ /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
+ *query = value << 8;
return 0;
}
@@ -3810,22 +3600,18 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
- uint32_t sclk_idx, mclk_idx, activity_percent = 0;
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t sclk_mhz, mclk_idx, activity_percent = 0;
struct vega10_hwmgr *data = hwmgr->backend;
struct vega10_dpm_table *dpm_table = &data->dpm_table;
int ret = 0;
- uint32_t reg, val_vid;
+ uint32_t val_vid;
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
- sclk_idx = smum_get_argument(hwmgr);
- if (sclk_idx < dpm_table->gfx_table.count) {
- *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
- *size = 4;
- } else {
- ret = -EINVAL;
- }
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
+ sclk_mhz = smum_get_argument(hwmgr);
+ *((uint32_t *)value) = sclk_mhz * 100;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
@@ -3856,18 +3642,10 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_POWER:
- if (*size < sizeof(struct pp_gpu_power))
- ret = -EINVAL;
- else {
- *size = sizeof(struct pp_gpu_power);
- ret = vega10_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
- }
+ ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value);
break;
case AMDGPU_PP_SENSOR_VDDGFX:
- reg = soc15_get_register_offset(SMUIO_HWID, 0,
- mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX,
- mmSMUSVI0_PLANE0_CURRENTVID);
- val_vid = (cgs_read_register(hwmgr->device, reg) &
+ val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) &
SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >>
SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
*((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
@@ -3956,26 +3734,18 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
(struct phm_ppt_v2_information *)hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
uint32_t idx;
- uint32_t num_active_disps = 0;
- struct cgs_display_info info = {0};
struct PP_Clocks min_clocks = {0};
uint32_t i;
struct pp_display_clock_request clock_req;
- info.mode_info = NULL;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- num_active_disps = info.display_count;
-
- if (num_active_disps > 1)
+ if (hwmgr->display_config->num_display > 1)
vega10_notify_smc_display_change(hwmgr, false);
else
vega10_notify_smc_display_change(hwmgr, true);
- min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
- min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
- min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
+ min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
+ min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
+ min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
for (i = 0; i < dpm_table->count; i++) {
if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
@@ -4120,6 +3890,47 @@ static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
}
}
+static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, uint32_t mask)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+
+ switch (type) {
+ case PP_SCLK:
+ data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
+ data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
+
+ PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
+ "Failed to upload boot level to lowest!",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
+ "Failed to upload dpm max level to highest!",
+ return -EINVAL);
+ break;
+
+ case PP_MCLK:
+ data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
+ data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
+
+ PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
+ "Failed to upload boot level to lowest!",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
+ "Failed to upload dpm max level to highest!",
+ return -EINVAL);
+
+ break;
+
+ case PP_PCIE:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
@@ -4356,97 +4167,15 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
struct vega10_hwmgr *data = hwmgr->backend;
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
int result = 0;
- uint32_t i;
if (!data->registry_data.disable_water_mark) {
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
- table->WatermarkRow[WM_DCEFCLK][i].MinClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
- }
-
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
- table->WatermarkRow[WM_SOCCLK][i].MinClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MaxClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MinUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
- }
+ smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
data->water_marks_bitmap = WaterMarksExist;
}
return result;
}
-static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, uint32_t mask)
-{
- struct vega10_hwmgr *data = hwmgr->backend;
-
- switch (type) {
- case PP_SCLK:
- data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
- data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
-
- PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
- "Failed to upload boot level to lowest!",
- return -EINVAL);
-
- PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
- "Failed to upload dpm max level to highest!",
- return -EINVAL);
- break;
-
- case PP_MCLK:
- data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
- data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
-
- PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
- "Failed to upload boot level to lowest!",
- return -EINVAL);
-
- PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
- "Failed to upload dpm max level to highest!",
- return -EINVAL);
-
- break;
-
- case PP_PCIE:
- default:
- break;
- }
-
- return 0;
-}
-
static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
@@ -4454,6 +4183,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
+ struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
+
int i, now, size = 0;
switch (type) {
@@ -4492,6 +4223,40 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
(i == now) ? "*" : "");
break;
+ case OD_SCLK:
+ if (hwmgr->od_enabled) {
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
+ podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
+ for (i = 0; i < podn_vdd_dep->count; i++)
+ size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
+ i, podn_vdd_dep->entries[i].clk / 100,
+ podn_vdd_dep->entries[i].vddc);
+ }
+ break;
+ case OD_MCLK:
+ if (hwmgr->od_enabled) {
+ size = sprintf(buf, "%s:\n", "OD_MCLK");
+ podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
+ for (i = 0; i < podn_vdd_dep->count; i++)
+ size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
+ i, podn_vdd_dep->entries[i].clk/100,
+ podn_vdd_dep->entries[i].vddc);
+ }
+ break;
+ case OD_RANGE:
+ if (hwmgr->od_enabled) {
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+ data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
+ hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
+ size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
+ data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
+ size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
+ data->odn_dpm_table.min_vddc,
+ data->odn_dpm_table.max_vddc);
+ }
+ break;
default:
break;
}
@@ -4501,10 +4266,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
- int result = 0;
- uint32_t num_turned_on_displays = 1;
Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
- struct cgs_display_info info = {0};
+ int result = 0;
if ((data->water_marks_bitmap & WaterMarksExist) &&
!(data->water_marks_bitmap & WaterMarksLoaded)) {
@@ -4514,10 +4277,8 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
}
if (data->water_marks_bitmap & WaterMarksLoaded) {
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_turned_on_displays = info.display_count;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
+ PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
}
return result;
@@ -4603,15 +4364,12 @@ vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg
{
struct vega10_hwmgr *data = hwmgr->backend;
bool is_update_required = false;
- struct cgs_display_info info = {0, 0, NULL};
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
+ if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
is_update_required = true;
if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
- if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
+ if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
is_update_required = true;
}
@@ -4886,6 +4644,200 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
return 0;
}
+
+static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ uint32_t clk,
+ uint32_t voltage)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+ struct vega10_single_dpm_table *golden_table;
+
+ if (voltage < odn_table->min_vddc || voltage > odn_table->max_vddc) {
+ pr_info("OD voltage is out of range [%d - %d] mV\n", odn_table->min_vddc, odn_table->max_vddc);
+ return false;
+ }
+
+ if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
+ golden_table = &(data->golden_dpm_table.gfx_table);
+ if (golden_table->dpm_levels[0].value > clk ||
+ hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
+ pr_info("OD engine clock is out of range [%d - %d] MHz\n",
+ golden_table->dpm_levels[0].value/100,
+ hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
+ return false;
+ }
+ } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
+ golden_table = &(data->golden_dpm_table.mem_table);
+ if (golden_table->dpm_levels[0].value > clk ||
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
+ pr_info("OD memory clock is out of range [%d - %d] MHz\n",
+ golden_table->dpm_levels[0].value/100,
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
+ return false;
+ }
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+ struct phm_ppt_v2_information *table_info = hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
+ uint32_t i;
+
+ dep_table = table_info->vdd_dep_on_mclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
+
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+ return;
+ }
+ }
+
+ dep_table = table_info->vdd_dep_on_sclk;
+ odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+ return;
+ }
+ }
+
+ if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+ data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+ }
+}
+
+static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
+ enum PP_OD_DPM_TABLE_COMMAND type)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ struct phm_ppt_v2_information *table_info = hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk;
+ struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.soc_table;
+
+ struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk =
+ &data->odn_dpm_table.vdd_dep_on_socclk;
+ struct vega10_odn_vddc_lookup_table *od_vddc_lookup_table = &data->odn_dpm_table.vddc_lookup_table;
+
+ struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep;
+ uint8_t i, j;
+
+ if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
+ podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
+ for (i = 0; i < podn_vdd_dep->count - 1; i++)
+ od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
+ if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
+ od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
+ } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
+ podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
+ for (i = 0; i < dpm_table->count; i++) {
+ for (j = 0; j < od_vddc_lookup_table->count; j++) {
+ if (od_vddc_lookup_table->entries[j].us_vdd >
+ podn_vdd_dep->entries[i].vddc)
+ break;
+ }
+ if (j == od_vddc_lookup_table->count) {
+ od_vddc_lookup_table->entries[j-1].us_vdd =
+ podn_vdd_dep->entries[i].vddc;
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+ }
+ podn_vdd_dep->entries[i].vddInd = j;
+ }
+ dpm_table = &data->dpm_table.soc_table;
+ for (i = 0; i < dep_table->count; i++) {
+ if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[dep_table->count-1].vddInd &&
+ dep_table->entries[i].clk < podn_vdd_dep->entries[dep_table->count-1].clk) {
+ data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
+ podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
+ dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
+ }
+ }
+ if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk <
+ podn_vdd_dep->entries[dep_table->count-1].clk) {
+ data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
+ podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
+ dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value = podn_vdd_dep->entries[dep_table->count-1].clk;
+ }
+ if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd <
+ podn_vdd_dep->entries[dep_table->count-1].vddInd) {
+ data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
+ podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd = podn_vdd_dep->entries[dep_table->count-1].vddInd;
+ }
+ }
+}
+
+static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_table;
+ struct vega10_single_dpm_table *dpm_table;
+
+ uint32_t input_clk;
+ uint32_t input_vol;
+ uint32_t input_level;
+ uint32_t i;
+
+ PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
+ return -EINVAL);
+
+ if (!hwmgr->od_enabled) {
+ pr_info("OverDrive feature not enabled\n");
+ return -EINVAL;
+ }
+
+ if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
+ dpm_table = &data->dpm_table.gfx_table;
+ podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_sclk;
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
+ dpm_table = &data->dpm_table.mem_table;
+ podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_mclk;
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
+ memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table));
+ vega10_odn_initial_default_setting(hwmgr);
+ return 0;
+ } else if (PP_OD_COMMIT_DPM_TABLE == type) {
+ vega10_check_dpm_table_updated(hwmgr);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+
+ for (i = 0; i < size; i += 3) {
+ if (i + 3 > size || input[i] >= podn_vdd_dep_table->count) {
+ pr_info("invalid clock voltage input\n");
+ return 0;
+ }
+ input_level = input[i];
+ input_clk = input[i+1] * 100;
+ input_vol = input[i+2];
+
+ if (vega10_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
+ dpm_table->dpm_levels[input_level].value = input_clk;
+ podn_vdd_dep_table->entries[input_level].clk = input_clk;
+ podn_vdd_dep_table->entries[input_level].vddc = input_vol;
+ } else {
+ return -EINVAL;
+ }
+ }
+ vega10_odn_update_soc_table(hwmgr, type);
+ return 0;
+}
+
static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.backend_init = vega10_hwmgr_backend_init,
.backend_fini = vega10_hwmgr_backend_fini,
@@ -4944,6 +4896,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.get_power_profile_mode = vega10_get_power_profile_mode,
.set_power_profile_mode = vega10_set_power_profile_mode,
.set_power_limit = vega10_set_power_limit,
+ .odn_edit_dpm_table = vega10_odn_edit_dpm_table,
};
int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index 5339ea1..aadd6cb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -282,15 +282,21 @@ struct vega10_registry_data {
struct vega10_odn_clock_voltage_dependency_table {
uint32_t count;
- struct phm_ppt_v1_clock_voltage_dependency_record
- entries[MAX_REGULAR_DPM_NUMBER];
+ struct phm_ppt_v1_clock_voltage_dependency_record entries[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct vega10_odn_vddc_lookup_table {
+ uint32_t count;
+ struct phm_ppt_v1_voltage_lookup_record entries[MAX_REGULAR_DPM_NUMBER];
};
struct vega10_odn_dpm_table {
- struct phm_odn_clock_levels odn_core_clock_dpm_levels;
- struct phm_odn_clock_levels odn_memory_clock_dpm_levels;
- struct vega10_odn_clock_voltage_dependency_table vdd_dependency_on_sclk;
- struct vega10_odn_clock_voltage_dependency_table vdd_dependency_on_mclk;
+ struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_sclk;
+ struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_mclk;
+ struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_socclk;
+ struct vega10_odn_vddc_lookup_table vddc_lookup_table;
+ uint32_t max_vddc;
+ uint32_t min_vddc;
};
struct vega10_odn_fan_table {
@@ -301,8 +307,8 @@ struct vega10_odn_fan_table {
};
struct vega10_hwmgr {
- struct vega10_dpm_table dpm_table;
- struct vega10_dpm_table golden_dpm_table;
+ struct vega10_dpm_table dpm_table;
+ struct vega10_dpm_table golden_dpm_table;
struct vega10_registry_data registry_data;
struct vega10_vbios_boot_state vbios_boot_state;
struct vega10_mclk_latency_table mclk_latency_table;
@@ -368,12 +374,8 @@ struct vega10_hwmgr {
bool need_long_memory_training;
/* Internal settings to apply the application power optimization parameters */
- bool apply_optimized_settings;
uint32_t disable_dpm_mask;
- /* ---- Overdrive next setting ---- */
- uint32_t apply_overdrive_next_settings_mask;
-
/* ---- SMU9 ---- */
struct smu_features smu_features[GNLD_FEATURES_MAX];
struct vega10_smc_state_table smc_state_table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index ba63fae..a9efd855 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -27,7 +27,7 @@
#include "vega10_ppsmc.h"
#include "vega10_inc.h"
#include "pp_debug.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] =
{
@@ -888,36 +888,36 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) {
if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en);
+ data = REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en);
+ data = REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data);
}
if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en);
+ data = REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en);
+ data = REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data);
}
if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en);
+ data = REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en);
+ data = REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data);
}
if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en);
+ data = REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en);
+ data = REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data);
}
if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL);
- data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en);
- data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en);
+ data = REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en);
+ data = REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data);
}
}
@@ -930,20 +930,18 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int result;
uint32_t num_se = 0, count, data;
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
num_se = adev->gfx.config.max_shader_engines;
- cgs_enter_safe_mode(hwmgr->device, true);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
- cgs_write_register(hwmgr->device, reg, data);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT);
result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT);
@@ -958,43 +956,43 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
if (0 != result)
break;
}
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
+ mutex_unlock(&adev->grbm_idx_mutex);
vega10_didt_set_mask(hwmgr, true);
- cgs_enter_safe_mode(hwmgr->device, false);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return 0;
}
static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
{
- cgs_enter_safe_mode(hwmgr->device, true);
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false);
- cgs_enter_safe_mode(hwmgr->device, false);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return 0;
}
static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int result;
uint32_t num_se = 0, count, data;
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
num_se = adev->gfx.config.max_shader_engines;
- cgs_enter_safe_mode(hwmgr->device, true);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
- cgs_write_register(hwmgr->device, reg, data);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT);
result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT);
@@ -1003,12 +1001,12 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
if (0 != result)
break;
}
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
+ mutex_unlock(&adev->grbm_idx_mutex);
vega10_didt_set_mask(hwmgr, true);
- cgs_enter_safe_mode(hwmgr->device, false);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
if (PP_CAP(PHM_PlatformCaps_GCEDC))
@@ -1022,13 +1020,14 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t data;
- cgs_enter_safe_mode(hwmgr->device, true);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false);
- cgs_enter_safe_mode(hwmgr->device, false);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
@@ -1043,20 +1042,18 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int result;
uint32_t num_se = 0, count, data;
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
num_se = adev->gfx.config.max_shader_engines;
- cgs_enter_safe_mode(hwmgr->device, true);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
- cgs_write_register(hwmgr->device, reg, data);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
result = vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT);
result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
@@ -1067,46 +1064,46 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
if (0 != result)
break;
}
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
+ mutex_unlock(&adev->grbm_idx_mutex);
vega10_didt_set_mask(hwmgr, true);
- cgs_enter_safe_mode(hwmgr->device, false);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return 0;
}
static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
{
- cgs_enter_safe_mode(hwmgr->device, true);
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false);
- cgs_enter_safe_mode(hwmgr->device, false);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return 0;
}
static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int result;
uint32_t num_se = 0;
uint32_t count, data;
- struct amdgpu_device *adev = hwmgr->adev;
- uint32_t reg;
num_se = adev->gfx.config.max_shader_engines;
- cgs_enter_safe_mode(hwmgr->device, true);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
- cgs_write_register(hwmgr->device, reg, data);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT);
@@ -1115,12 +1112,12 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
if (0 != result)
break;
}
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
+ mutex_unlock(&adev->grbm_idx_mutex);
vega10_didt_set_mask(hwmgr, true);
- cgs_enter_safe_mode(hwmgr->device, false);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
@@ -1137,13 +1134,14 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t data;
- cgs_enter_safe_mode(hwmgr->device, true);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false);
- cgs_enter_safe_mode(hwmgr->device, false);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
@@ -1158,15 +1156,14 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
{
- uint32_t reg;
+ struct amdgpu_device *adev = hwmgr->adev;
int result;
- cgs_enter_safe_mode(hwmgr->device, true);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
- cgs_lock_grbm_idx(hwmgr->device, true);
- reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
- cgs_write_register(hwmgr->device, reg, 0xE0000000);
- cgs_lock_grbm_idx(hwmgr->device, false);
+ mutex_lock(&adev->grbm_idx_mutex);
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
+ mutex_unlock(&adev->grbm_idx_mutex);
result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT);
@@ -1175,7 +1172,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, false);
- cgs_enter_safe_mode(hwmgr->device, false);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index c61d074..0768d25 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -52,7 +52,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
if (!table_address) {
table_address = (ATOM_Vega10_POWERPLAYTABLE *)
- cgs_atom_get_data_table(hwmgr->device, index,
+ smu_atom_get_data_table(hwmgr->adev, index,
&size, &frev, &crev);
hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index 9f18226..aa044c1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -25,7 +25,7 @@
#include "vega10_hwmgr.h"
#include "vega10_ppsmc.h"
#include "vega10_inc.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
#include "pp_debug.h"
static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
@@ -89,6 +89,7 @@ int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
{
+ struct amdgpu_device *adev = hwmgr->adev;
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t tach_period;
uint32_t crystal_clock_freq;
@@ -100,10 +101,8 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
result = vega10_get_current_rpm(hwmgr, speed);
} else {
- uint32_t reg = soc15_get_register_offset(THM_HWID, 0,
- mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS);
tach_period =
- CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_STATUS),
CG_TACH_STATUS,
TACH_PERIOD);
@@ -127,26 +126,23 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
*/
int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
- uint32_t reg;
-
- reg = soc15_get_register_offset(THM_HWID, 0,
- mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
+ struct amdgpu_device *adev = hwmgr->adev;
if (hwmgr->fan_ctrl_is_in_default_mode) {
hwmgr->fan_ctrl_default_mode =
- CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
CG_FDO_CTRL2, FDO_PWM_MODE);
hwmgr->tmin =
- CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
CG_FDO_CTRL2, TMIN);
hwmgr->fan_ctrl_is_in_default_mode = false;
}
- cgs_write_register(hwmgr->device, reg,
- CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
CG_FDO_CTRL2, TMIN, 0));
- cgs_write_register(hwmgr->device, reg,
- CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
CG_FDO_CTRL2, FDO_PWM_MODE, mode));
return 0;
@@ -159,18 +155,15 @@ int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
*/
int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
{
- uint32_t reg;
-
- reg = soc15_get_register_offset(THM_HWID, 0,
- mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
+ struct amdgpu_device *adev = hwmgr->adev;
if (!hwmgr->fan_ctrl_is_in_default_mode) {
- cgs_write_register(hwmgr->device, reg,
- CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
CG_FDO_CTRL2, FDO_PWM_MODE,
hwmgr->fan_ctrl_default_mode));
- cgs_write_register(hwmgr->device, reg,
- CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
CG_FDO_CTRL2, TMIN,
hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT));
hwmgr->fan_ctrl_is_in_default_mode = true;
@@ -257,10 +250,10 @@ int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t speed)
{
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t duty100;
uint32_t duty;
uint64_t tmp64;
- uint32_t reg;
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
@@ -271,10 +264,7 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
- reg = soc15_get_register_offset(THM_HWID, 0,
- mmCG_FDO_CTRL1_BASE_IDX, mmCG_FDO_CTRL1);
-
- duty100 = CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
CG_FDO_CTRL1, FMAX_DUTY100);
if (duty100 == 0)
@@ -284,10 +274,8 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
do_div(tmp64, 100);
duty = (uint32_t)tmp64;
- reg = soc15_get_register_offset(THM_HWID, 0,
- mmCG_FDO_CTRL0_BASE_IDX, mmCG_FDO_CTRL0);
- cgs_write_register(hwmgr->device, reg,
- CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
@@ -317,10 +305,10 @@ int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
*/
int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
{
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t tach_period;
uint32_t crystal_clock_freq;
int result = 0;
- uint32_t reg;
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
@@ -333,10 +321,8 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
if (!result) {
crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
- reg = soc15_get_register_offset(THM_HWID, 0,
- mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS);
- cgs_write_register(hwmgr->device, reg,
- CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ WREG32_SOC15(THM, 0, mmCG_TACH_STATUS,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_STATUS),
CG_TACH_STATUS, TACH_PERIOD,
tach_period));
}
@@ -350,13 +336,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
*/
int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int temp;
- uint32_t reg;
- reg = soc15_get_register_offset(THM_HWID, 0,
- mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS);
-
- temp = cgs_read_register(hwmgr->device, reg);
+ temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
@@ -379,11 +362,12 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- uint32_t val, reg;
+ uint32_t val;
if (low < range->min)
low = range->min;
@@ -393,20 +377,17 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
if (low > high)
return -EINVAL;
- reg = soc15_get_register_offset(THM_HWID, 0,
- mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL);
-
- val = cgs_read_register(hwmgr->device, reg);
+ val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) &
(~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) &
(~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
- cgs_write_register(hwmgr->device, reg, val);
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
return 0;
}
@@ -418,21 +399,17 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
*/
static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
{
- uint32_t reg;
+ struct amdgpu_device *adev = hwmgr->adev;
if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
- reg = soc15_get_register_offset(THM_HWID, 0,
- mmCG_TACH_CTRL_BASE_IDX, mmCG_TACH_CTRL);
- cgs_write_register(hwmgr->device, reg,
- CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
CG_TACH_CTRL, EDGE_PER_REV,
hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1));
}
- reg = soc15_get_register_offset(THM_HWID, 0,
- mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
- cgs_write_register(hwmgr->device, reg,
- CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28));
return 0;
@@ -445,9 +422,9 @@ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
*/
static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t val = 0;
- uint32_t reg;
if (data->smu_features[GNLD_FW_CTF].supported) {
if (data->smu_features[GNLD_FW_CTF].enabled)
@@ -465,8 +442,7 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
- reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
- cgs_write_register(hwmgr->device, reg, val);
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
return 0;
}
@@ -477,8 +453,8 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
*/
int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
struct vega10_hwmgr *data = hwmgr->backend;
- uint32_t reg;
if (data->smu_features[GNLD_FW_CTF].supported) {
if (!data->smu_features[GNLD_FW_CTF].enabled)
@@ -493,8 +469,7 @@ int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_FW_CTF].enabled = false;
}
- reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
- cgs_write_register(hwmgr->device, reg, 0);
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 200de46..782e209 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -34,7 +34,6 @@
#include "atomfirmware.h"
#include "cgs_common.h"
#include "vega12_inc.h"
-#include "pp_soc15.h"
#include "pppcielanes.h"
#include "vega12_hwmgr.h"
#include "vega12_processpptables.h"
@@ -546,6 +545,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
return -EINVAL);
dpm_table->dpm_levels[i].value = clock;
+ dpm_table->dpm_levels[i].enabled = true;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -565,6 +565,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
return -EINVAL);
dpm_table->dpm_levels[i].value = clock;
+ dpm_table->dpm_levels[i].enabled = true;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -585,6 +586,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
return -EINVAL);
dpm_table->dpm_levels[i].value = clock;
+ dpm_table->dpm_levels[i].enabled = true;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -605,6 +607,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
return -EINVAL);
dpm_table->dpm_levels[i].value = clock;
+ dpm_table->dpm_levels[i].enabled = true;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -625,6 +628,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
return -EINVAL);
dpm_table->dpm_levels[i].value = clock;
+ dpm_table->dpm_levels[i].enabled = true;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -645,6 +649,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
return -EINVAL);
dpm_table->dpm_levels[i].value = clock;
+ dpm_table->dpm_levels[i].enabled = true;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -666,6 +671,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
return -EINVAL);
dpm_table->dpm_levels[i].value = clock;
+ dpm_table->dpm_levels[i].enabled = true;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -686,6 +692,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
return -EINVAL);
dpm_table->dpm_levels[i].value = clock;
+ dpm_table->dpm_levels[i].enabled = true;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -706,6 +713,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
return -EINVAL);
dpm_table->dpm_levels[i].value = clock;
+ dpm_table->dpm_levels[i].enabled = true;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -726,6 +734,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
return -EINVAL);
dpm_table->dpm_levels[i].value = clock;
+ dpm_table->dpm_levels[i].enabled = true;
}
vega12_init_dpm_state(&(dpm_table->dpm_state));
@@ -992,15 +1001,55 @@ static uint32_t vega12_find_highest_dpm_level(
static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
{
+ struct vega12_hwmgr *data = hwmgr->backend;
+ if (data->smc_state_table.gfx_boot_level !=
+ data->dpm_table.gfx_table.dpm_state.soft_min_level) {
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMinByFreq,
+ PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value);
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->smc_state_table.gfx_boot_level;
+ }
+
+ if (data->smc_state_table.mem_boot_level !=
+ data->dpm_table.mem_table.dpm_state.soft_min_level) {
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMinByFreq,
+ PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value);
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->smc_state_table.mem_boot_level;
+ }
+
return 0;
+
}
static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
{
+ struct vega12_hwmgr *data = hwmgr->backend;
+ if (data->smc_state_table.gfx_max_level !=
+ data->dpm_table.gfx_table.dpm_state.soft_max_level) {
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxByFreq,
+ /* plus the vale by 1 to align the resolution */
+ PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1));
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->smc_state_table.gfx_max_level;
+ }
+
+ if (data->smc_state_table.mem_max_level !=
+ data->dpm_table.mem_table.dpm_state.soft_max_level) {
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxByFreq,
+ /* plus the vale by 1 to align the resolution */
+ PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1));
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->smc_state_table.mem_max_level;
+ }
+
return 0;
}
-
int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
struct vega12_hwmgr *data =
@@ -1064,8 +1113,7 @@ static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
return (mem_clk * 100);
}
-static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr,
- struct pp_gpu_power *query)
+static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
{
#if 0
uint32_t value;
@@ -1077,7 +1125,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr,
vega12_read_arg_from_smc(hwmgr, &value);
/* power value is an integer */
- query->average_gpu_power = value << 8;
+ *query = value << 8;
#endif
return 0;
}
@@ -1186,12 +1234,8 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_POWER:
- if (*size < sizeof(struct pp_gpu_power))
- ret = -EINVAL;
- else {
- *size = sizeof(struct pp_gpu_power);
- ret = vega12_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
- }
+ ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value);
+
break;
default:
ret = -EINVAL;
@@ -1260,23 +1304,18 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
- uint32_t num_active_disps = 0;
- struct cgs_display_info info = {0};
struct PP_Clocks min_clocks = {0};
struct pp_display_clock_request clock_req;
uint32_t clk_request;
- info.mode_info = NULL;
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_active_disps = info.display_count;
- if (num_active_disps > 1)
+ if (hwmgr->display_config->num_display > 1)
vega12_notify_smc_display_change(hwmgr, false);
else
vega12_notify_smc_display_change(hwmgr, true);
- min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
- min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
- min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
+ min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
+ min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
+ min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
clock_req.clock_type = amd_pp_dcef_clock;
@@ -1832,9 +1871,7 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
int result = 0;
- uint32_t num_turned_on_displays = 1;
Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
- struct cgs_display_info info = {0};
if ((data->water_marks_bitmap & WaterMarksExist) &&
!(data->water_marks_bitmap & WaterMarksLoaded)) {
@@ -1846,12 +1883,9 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if ((data->water_marks_bitmap & WaterMarksExist) &&
data->smu_features[GNLD_DPM_DCEFCLK].supported &&
- data->smu_features[GNLD_DPM_SOCCLK].supported) {
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_turned_on_displays = info.display_count;
+ data->smu_features[GNLD_DPM_SOCCLK].supported)
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
- }
+ PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
return result;
}
@@ -1894,15 +1928,12 @@ vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
bool is_update_required = false;
- struct cgs_display_info info = {0, 0, NULL};
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- if (data->display_timing.num_existing_displays != info.display_count)
+ if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
is_update_required = true;
if (data->registry_data.gfx_clk_deep_sleep_support) {
- if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
+ if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
is_update_required = true;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index bc98b1d..e81ded1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -33,7 +33,7 @@
#define WaterMarksExist 1
#define WaterMarksLoaded 2
-#define VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS 8
+#define VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS 16
#define VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS 8
#define VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS 8
#define VG12_PSUEDO_NUM_UCLK_DPM_LEVELS 4
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index b34113f..888ddca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -51,7 +51,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
if (!table_address) {
table_address = (ATOM_Vega12_POWERPLAYTABLE *)
- cgs_atom_get_data_table(hwmgr->device, index,
+ smu_atom_get_data_table(hwmgr->adev, index,
&size, &frev, &crev);
hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/
@@ -224,6 +224,11 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
+ /* 0xFFFF will disable the ACG feature */
+ if (!(hwmgr->feature_mask & PP_ACG_MASK)) {
+ ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF;
+ ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
index df0fa81..cfd9e6c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
@@ -26,7 +26,7 @@
#include "vega12_smumgr.h"
#include "vega12_ppsmc.h"
#include "vega12_inc.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
#include "pp_debug.h"
static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
@@ -147,13 +147,10 @@ int vega12_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
*/
int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int temp = 0;
- uint32_t reg;
- reg = soc15_get_register_offset(THM_HWID, 0,
- mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS);
-
- temp = cgs_read_register(hwmgr->device, reg);
+ temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
@@ -175,11 +172,12 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- uint32_t val, reg;
+ uint32_t val;
if (low < range->min)
low = range->min;
@@ -189,18 +187,15 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
if (low > high)
return -EINVAL;
- reg = soc15_get_register_offset(THM_HWID, 0,
- mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL);
-
- val = cgs_read_register(hwmgr->device, reg);
+ val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
- cgs_write_register(hwmgr->device, reg, val);
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
return 0;
}
@@ -212,15 +207,14 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
*/
static int vega12_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t val = 0;
- uint32_t reg;
val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
- reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
- cgs_write_register(hwmgr->device, reg, val);
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
return 0;
}
@@ -231,10 +225,9 @@ static int vega12_thermal_enable_alert(struct pp_hwmgr *hwmgr)
*/
int vega12_thermal_disable_alert(struct pp_hwmgr *hwmgr)
{
- uint32_t reg;
+ struct amdgpu_device *adev = hwmgr->adev;
- reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA);
- cgs_write_register(hwmgr->device, reg, 0);
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 8b78bbe..9bb8785 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -377,11 +377,7 @@ struct phm_clocks {
#define DPMTABLE_UPDATE_SCLK 0x00000004
#define DPMTABLE_UPDATE_MCLK 0x00000008
#define DPMTABLE_OD_UPDATE_VDDC 0x00000010
-
-/* To determine if sclk and mclk are in overdrive state */
-#define SCLK_OVERDRIVE_ENABLED 0x00000001
-#define MCLK_OVERDRIVE_ENABLED 0x00000002
-#define VDDC_OVERDRIVE_ENABLED 0x00000010
+#define DPMTABLE_UPDATE_SOCCLK 0x00000020
struct phm_odn_performance_level {
uint32_t clock;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 17f811d..3c321c7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -38,6 +38,8 @@ struct phm_fan_speed_info;
struct pp_atomctrl_voltage_table;
#define VOLTAGE_SCALE 4
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
enum DISPLAY_GAP {
DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */
@@ -64,24 +66,6 @@ struct vi_dpm_table {
#define PCIE_PERF_REQ_GEN2 3
#define PCIE_PERF_REQ_GEN3 4
-enum PP_FEATURE_MASK {
- PP_SCLK_DPM_MASK = 0x1,
- PP_MCLK_DPM_MASK = 0x2,
- PP_PCIE_DPM_MASK = 0x4,
- PP_SCLK_DEEP_SLEEP_MASK = 0x8,
- PP_POWER_CONTAINMENT_MASK = 0x10,
- PP_UVD_HANDSHAKE_MASK = 0x20,
- PP_SMC_VOLTAGE_CONTROL_MASK = 0x40,
- PP_VBI_TIME_SUPPORT_MASK = 0x80,
- PP_ULV_MASK = 0x100,
- PP_ENABLE_GFX_CG_THRU_SMU = 0x200,
- PP_CLOCK_STRETCH_MASK = 0x400,
- PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800,
- PP_SOCCLK_DPM_MASK = 0x1000,
- PP_DCEFCLK_DPM_MASK = 0x2000,
- PP_OVERDRIVE_MASK = 0x4000,
-};
-
enum PHM_BackEnd_Magic {
PHM_Dummy_Magic = 0xAA5555AA,
PHM_RV770_Magic = 0xDCBAABCD,
@@ -312,6 +296,7 @@ struct pp_hwmgr_func {
int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock);
int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
+ int (*gfx_off_control)(struct pp_hwmgr *hwmgr, bool enable);
int (*power_off_asic)(struct pp_hwmgr *hwmgr);
int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
@@ -341,6 +326,7 @@ struct pp_hwmgr_func {
long *input, uint32_t size);
int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr);
+ int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
};
struct pp_table_func {
@@ -718,6 +704,7 @@ struct pp_hwmgr {
uint32_t chip_family;
uint32_t chip_id;
uint32_t smu_version;
+ bool not_vf;
bool pm_en;
struct mutex smu_lock;
@@ -764,7 +751,7 @@ struct pp_hwmgr {
struct pp_power_state *request_ps;
struct pp_power_state *boot_ps;
struct pp_power_state *uvd_ps;
- struct amd_pp_display_configuration display_config;
+ const struct amd_pp_display_configuration *display_config;
uint32_t feature_mask;
bool avfs_supported;
/* UMD Pstate */
@@ -782,10 +769,13 @@ struct pp_hwmgr {
};
int hwmgr_early_init(struct pp_hwmgr *hwmgr);
+int hwmgr_sw_init(struct pp_hwmgr *hwmgr);
+int hwmgr_sw_fini(struct pp_hwmgr *hwmgr);
int hwmgr_hw_init(struct pp_hwmgr *hwmgr);
int hwmgr_hw_fini(struct pp_hwmgr *hwmgr);
-int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr);
-int hwmgr_hw_resume(struct pp_hwmgr *hwmgr);
+int hwmgr_suspend(struct pp_hwmgr *hwmgr);
+int hwmgr_resume(struct pp_hwmgr *hwmgr);
+
int hwmgr_handle_task(struct pp_hwmgr *hwmgr,
enum amd_pp_task task_id,
enum amd_pm_state_type *user_state);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index 426bff2..a2991fa 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -75,13 +75,15 @@
#define PPSMC_MSG_GetMinGfxclkFrequency 0x2C
#define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D
#define PPSMC_MSG_SoftReset 0x2E
+#define PPSMC_MSG_SetGfxCGPG 0x2F
#define PPSMC_MSG_SetSoftMaxGfxClk 0x30
#define PPSMC_MSG_SetHardMinGfxClk 0x31
#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32
#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33
#define PPSMC_MSG_SetSoftMaxVcn 0x34
#define PPSMC_MSG_PowerGateMmHub 0x35
-#define PPSMC_Message_Count 0x36
+#define PPSMC_MSG_SetRccPfcPmeRestoreRegister 0x36
+#define PPSMC_Message_Count 0x37
typedef uint16_t PPSMC_Result;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75.h b/drivers/gpu/drm/amd/powerplay/inc/smu75.h
new file mode 100644
index 0000000..7715230
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu75.h
@@ -0,0 +1,760 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef SMU75_H
+#define SMU75_H
+
+#pragma pack(push, 1)
+
+typedef struct {
+ uint32_t high;
+ uint32_t low;
+} data_64_t;
+
+typedef struct {
+ data_64_t high;
+ data_64_t low;
+} data_128_t;
+
+#define SMU__DGPU_ONLY
+
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+
+#define SMU7_CONTEXT_ID_SMC 1
+#define SMU7_CONTEXT_ID_VBIOS 2
+
+#define SMU75_MAX_LEVELS_VDDC 16
+#define SMU75_MAX_LEVELS_VDDGFX 16
+#define SMU75_MAX_LEVELS_VDDCI 8
+#define SMU75_MAX_LEVELS_MVDD 4
+
+#define SMU_MAX_SMIO_LEVELS 4
+
+#define SMU75_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE
+#define SMU75_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS
+#define SMU75_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS
+#define SMU75_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS
+#define SMU75_MAX_LEVELS_UVD 8
+#define SMU75_MAX_LEVELS_VCE 8
+#define SMU75_MAX_LEVELS_ACP 8
+#define SMU75_MAX_LEVELS_SAMU 8
+#define SMU75_MAX_ENTRIES_SMIO 32
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL 0
+
+#define GPIO_CLAMP_MODE_VRHOT 1
+#define GPIO_CLAMP_MODE_THERM 2
+#define GPIO_CLAMP_MODE_DC 4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
+#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6
+#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
+#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9
+#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
+#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12
+#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15
+#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18
+#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
+#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21
+#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
+#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
+#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
+#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
+#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+
+/* Virtualization Defines */
+#define CG_XDMA_MASK 0x1
+#define CG_XDMA_SHIFT 0
+#define CG_UVD_MASK 0x2
+#define CG_UVD_SHIFT 1
+#define CG_VCE_MASK 0x4
+#define CG_VCE_SHIFT 2
+#define CG_SAMU_MASK 0x8
+#define CG_SAMU_SHIFT 3
+#define CG_GFX_MASK 0x10
+#define CG_GFX_SHIFT 4
+#define CG_SDMA_MASK 0x20
+#define CG_SDMA_SHIFT 5
+#define CG_HDP_MASK 0x40
+#define CG_HDP_SHIFT 6
+#define CG_MC_MASK 0x80
+#define CG_MC_SHIFT 7
+#define CG_DRM_MASK 0x100
+#define CG_DRM_SHIFT 8
+#define CG_ROM_MASK 0x200
+#define CG_ROM_SHIFT 9
+#define CG_BIF_MASK 0x400
+#define CG_BIF_SHIFT 10
+
+#if defined SMU__DGPU_ONLY
+#define SMU75_DTE_ITERATIONS 5
+#define SMU75_DTE_SOURCES 3
+#define SMU75_DTE_SINKS 1
+#define SMU75_NUM_CPU_TES 0
+#define SMU75_NUM_GPU_TES 1
+#define SMU75_NUM_NON_TES 2
+#define SMU75_DTE_FAN_SCALAR_MIN 0x100
+#define SMU75_DTE_FAN_SCALAR_MAX 0x166
+#define SMU75_DTE_FAN_TEMP_MAX 93
+#define SMU75_DTE_FAN_TEMP_MIN 83
+#endif
+#define SMU75_THERMAL_INPUT_LOOP_COUNT 2
+#define SMU75_THERMAL_CLAMP_MODE_COUNT 2
+
+#define EXP_M1_1 93
+#define EXP_M2_1 195759
+#define EXP_B_1 111176531
+
+#define EXP_M1_2 67
+#define EXP_M2_2 153720
+#define EXP_B_2 94415767
+
+#define EXP_M1_3 48
+#define EXP_M2_3 119796
+#define EXP_B_3 79195279
+
+#define EXP_M1_4 550
+#define EXP_M2_4 1484190
+#define EXP_B_4 1051432828
+
+#define EXP_M1_5 394
+#define EXP_M2_5 1143049
+#define EXP_B_5 864288432
+
+struct SMU7_HystController_Data {
+ uint16_t waterfall_up;
+ uint16_t waterfall_down;
+ uint16_t waterfall_limit;
+ uint16_t release_cnt;
+ uint16_t release_limit;
+ uint16_t spare;
+};
+
+typedef struct SMU7_HystController_Data SMU7_HystController_Data;
+
+struct SMU75_PIDController {
+ uint32_t Ki;
+ int32_t LFWindupUpperLim;
+ int32_t LFWindupLowerLim;
+ uint32_t StatePrecision;
+ uint32_t LfPrecision;
+ uint32_t LfOffset;
+ uint32_t MaxState;
+ uint32_t MaxLfFraction;
+ uint32_t StateShift;
+};
+
+typedef struct SMU75_PIDController SMU75_PIDController;
+
+struct SMU7_LocalDpmScoreboard {
+ uint32_t PercentageBusy;
+
+ int32_t PIDError;
+ int32_t PIDIntegral;
+ int32_t PIDOutput;
+
+ uint32_t SigmaDeltaAccum;
+ uint32_t SigmaDeltaOutput;
+ uint32_t SigmaDeltaLevel;
+
+ uint32_t UtilizationSetpoint;
+
+ uint8_t TdpClampMode;
+ uint8_t TdcClampMode;
+ uint8_t ThermClampMode;
+ uint8_t VoltageBusy;
+
+ int8_t CurrLevel;
+ int8_t TargLevel;
+ uint8_t LevelChangeInProgress;
+ uint8_t UpHyst;
+
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t DpmEnable;
+ uint8_t DpmRunning;
+
+ uint8_t DpmForce;
+ uint8_t DpmForceLevel;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+
+ uint32_t MinimumPerfSclk;
+
+ uint8_t AcpiReq;
+ uint8_t AcpiAck;
+ uint8_t GfxClkSlow;
+ uint8_t GpioClampMode;
+
+ uint8_t EnableModeSwitchRLCNotification;
+ uint8_t EnabledLevelsChange;
+ uint8_t DteClampMode;
+ uint8_t FpsClampMode;
+
+ uint16_t LevelResidencyCounters [SMU75_MAX_LEVELS_GRAPHICS];
+ uint16_t LevelSwitchCounters [SMU75_MAX_LEVELS_GRAPHICS];
+
+ void (*TargetStateCalculator)(uint8_t);
+ void (*SavedTargetStateCalculator)(uint8_t);
+
+ uint16_t AutoDpmInterval;
+ uint16_t AutoDpmRange;
+
+ uint8_t FpsEnabled;
+ uint8_t MaxPerfLevel;
+ uint8_t AllowLowClkInterruptToHost;
+ uint8_t FpsRunning;
+
+ uint32_t MaxAllowedFrequency;
+
+ uint32_t FilteredSclkFrequency;
+ uint32_t LastSclkFrequency;
+ uint32_t FilteredSclkFrequencyCnt;
+
+ uint8_t MinPerfLevel;
+#ifdef SMU__FIRMWARE_SCKS_PRESENT__1
+ uint8_t ScksClampMode;
+ uint8_t padding[2];
+#else
+ uint8_t padding[3];
+#endif
+
+ uint16_t FpsAlpha;
+ uint16_t DeltaTime;
+ uint32_t CurrentFps;
+ uint32_t FilteredFps;
+ uint32_t FrameCount;
+ uint32_t FrameCountLast;
+ uint16_t FpsTargetScalar;
+ uint16_t FpsWaterfallLimitScalar;
+ uint16_t FpsAlphaScalar;
+ uint16_t spare8;
+ SMU7_HystController_Data HystControllerData;
+};
+
+typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard;
+
+#define SMU7_MAX_VOLTAGE_CLIENTS 12
+
+typedef uint8_t (*VoltageChangeHandler_t)(uint16_t, uint8_t);
+
+#define VDDC_MASK 0x00007FFF
+#define VDDC_SHIFT 0
+#define VDDCI_MASK 0x3FFF8000
+#define VDDCI_SHIFT 15
+#define PHASES_MASK 0xC0000000
+#define PHASES_SHIFT 30
+
+typedef uint32_t SMU_VoltageLevel;
+
+struct SMU7_VoltageScoreboard {
+ SMU_VoltageLevel TargetVoltage;
+ uint16_t MaxVid;
+ uint8_t HighestVidOffset;
+ uint8_t CurrentVidOffset;
+
+ uint16_t CurrentVddc;
+ uint16_t CurrentVddci;
+
+ uint8_t ControllerBusy;
+ uint8_t CurrentVid;
+ uint8_t CurrentVddciVid;
+ uint8_t padding;
+
+ SMU_VoltageLevel RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS];
+ SMU_VoltageLevel TargetVoltageState;
+ uint8_t EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS];
+
+ uint8_t padding2;
+ uint8_t padding3;
+ uint8_t ControllerEnable;
+ uint8_t ControllerRunning;
+ uint16_t CurrentStdVoltageHiSidd;
+ uint16_t CurrentStdVoltageLoSidd;
+ uint8_t OverrideVoltage;
+ uint8_t padding4;
+ uint8_t padding5;
+ uint8_t CurrentPhases;
+
+ VoltageChangeHandler_t ChangeVddc;
+ VoltageChangeHandler_t ChangeVddci;
+ VoltageChangeHandler_t ChangePhase;
+ VoltageChangeHandler_t ChangeMvdd;
+
+ VoltageChangeHandler_t functionLinks[6];
+
+ uint16_t * VddcFollower1;
+ int16_t Driver_OD_RequestedVidOffset1;
+ int16_t Driver_OD_RequestedVidOffset2;
+};
+
+typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard;
+
+#define SMU7_MAX_PCIE_LINK_SPEEDS 3
+
+struct SMU7_PCIeLinkSpeedScoreboard {
+ uint8_t DpmEnable;
+ uint8_t DpmRunning;
+ uint8_t DpmForce;
+ uint8_t DpmForceLevel;
+
+ uint8_t CurrentLinkSpeed;
+ uint8_t EnabledLevelsChange;
+ uint16_t AutoDpmInterval;
+
+ uint16_t AutoDpmRange;
+ uint16_t AutoDpmCount;
+
+ uint8_t DpmMode;
+ uint8_t AcpiReq;
+ uint8_t AcpiAck;
+ uint8_t CurrentLinkLevel;
+};
+
+typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard;
+
+#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
+
+#define SMU7_SCALE_I 7
+#define SMU7_SCALE_R 12
+
+struct SMU7_PowerScoreboard {
+ uint32_t GpuPower;
+
+ uint32_t VddcPower;
+ uint32_t VddcVoltage;
+ uint32_t VddcCurrent;
+
+ uint32_t VddciPower;
+ uint32_t VddciVoltage;
+ uint32_t VddciCurrent;
+
+ uint32_t RocPower;
+
+ uint16_t Telemetry_1_slope;
+ uint16_t Telemetry_2_slope;
+ int32_t Telemetry_1_offset;
+ int32_t Telemetry_2_offset;
+
+ uint8_t MCLK_patch_flag;
+ uint8_t reserved[3];
+};
+
+typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
+
+#define SMU7_SCLK_DPM_CONFIG_MASK 0x01
+#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02
+#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04
+#define SMU7_MCLK_DPM_CONFIG_MASK 0x08
+#define SMU7_UVD_DPM_CONFIG_MASK 0x10
+#define SMU7_VCE_DPM_CONFIG_MASK 0x20
+#define SMU7_ACP_DPM_CONFIG_MASK 0x40
+#define SMU7_SAMU_DPM_CONFIG_MASK 0x80
+#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100
+
+#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001
+#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002
+#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100
+#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200
+#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
+#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
+
+struct SMU75_SoftRegisters {
+ uint32_t RefClockFrequency;
+ uint32_t PmTimerPeriod;
+ uint32_t FeatureEnables;
+#if defined (SMU__DGPU_ONLY)
+ uint32_t PreVBlankGap;
+ uint32_t VBlankTimeout;
+ uint32_t TrainTimeGap;
+ uint32_t MvddSwitchTime;
+ uint32_t LongestAcpiTrainTime;
+ uint32_t AcpiDelay;
+ uint32_t G5TrainTime;
+ uint32_t DelayMpllPwron;
+ uint32_t VoltageChangeTimeout;
+#endif
+ uint32_t HandshakeDisables;
+
+ uint8_t DisplayPhy1Config;
+ uint8_t DisplayPhy2Config;
+ uint8_t DisplayPhy3Config;
+ uint8_t DisplayPhy4Config;
+
+ uint8_t DisplayPhy5Config;
+ uint8_t DisplayPhy6Config;
+ uint8_t DisplayPhy7Config;
+ uint8_t DisplayPhy8Config;
+
+ uint32_t AverageGraphicsActivity;
+ uint32_t AverageMemoryActivity;
+ uint32_t AverageGioActivity;
+
+ uint8_t SClkDpmEnabledLevels;
+ uint8_t MClkDpmEnabledLevels;
+ uint8_t LClkDpmEnabledLevels;
+ uint8_t PCIeDpmEnabledLevels;
+
+ uint8_t UVDDpmEnabledLevels;
+ uint8_t SAMUDpmEnabledLevels;
+ uint8_t ACPDpmEnabledLevels;
+ uint8_t VCEDpmEnabledLevels;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+ uint32_t UlvEnterCount;
+ uint32_t UlvTime;
+ uint32_t UcodeLoadStatus;
+ uint32_t AllowMvddSwitch;
+ uint8_t Activity_Weight;
+ uint8_t Reserved8[3];
+};
+
+typedef struct SMU75_SoftRegisters SMU75_SoftRegisters;
+
+struct SMU75_Firmware_Header {
+ uint32_t Digest[5];
+ uint32_t Version;
+ uint32_t HeaderSize;
+ uint32_t Flags;
+ uint32_t EntryPoint;
+ uint32_t CodeSize;
+ uint32_t ImageSize;
+
+ uint32_t Rtos;
+ uint32_t SoftRegisters;
+ uint32_t DpmTable;
+ uint32_t FanTable;
+ uint32_t CacConfigTable;
+ uint32_t CacStatusTable;
+ uint32_t mcRegisterTable;
+ uint32_t mcArbDramTimingTable;
+ uint32_t PmFuseTable;
+ uint32_t Globals;
+ uint32_t ClockStretcherTable;
+ uint32_t VftTable;
+ uint32_t Reserved1;
+ uint32_t AvfsCksOff_AvfsGbvTable;
+ uint32_t AvfsCksOff_BtcGbvTable;
+ uint32_t MM_AvfsTable;
+ uint32_t PowerSharingTable;
+ uint32_t AvfsTable;
+ uint32_t AvfsCksOffGbvTable;
+ uint32_t AvfsMeanNSigma;
+ uint32_t AvfsSclkOffsetTable;
+ uint32_t Reserved[12];
+ uint32_t Signature;
+};
+
+typedef struct SMU75_Firmware_Header SMU75_Firmware_Header;
+
+#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000
+
+enum DisplayConfig {
+ PowerDown = 1,
+ DP54x4,
+ DP54x2,
+ DP54x1,
+ DP27x4,
+ DP27x2,
+ DP27x1,
+ HDMI297,
+ HDMI162,
+ LVDS,
+ DP324x4,
+ DP324x2,
+ DP324x1
+};
+
+#define MC_BLOCK_COUNT 1
+#define CPL_BLOCK_COUNT 5
+#define SE_BLOCK_COUNT 15
+#define GC_BLOCK_COUNT 24
+
+struct SMU7_Local_Cac {
+ uint8_t BlockId;
+ uint8_t SignalId;
+ uint8_t Threshold;
+ uint8_t Padding;
+};
+
+typedef struct SMU7_Local_Cac SMU7_Local_Cac;
+
+struct SMU7_Local_Cac_Table {
+ SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT];
+ SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT];
+ SMU7_Local_Cac SeLocalCac[SE_BLOCK_COUNT];
+ SMU7_Local_Cac GcLocalCac[GC_BLOCK_COUNT];
+};
+
+typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table;
+
+#pragma pack(pop)
+
+#define CG_SYS_BITMASK_FIRST_BIT 0
+#define CG_SYS_BITMASK_LAST_BIT 10
+#define CG_SYS_BIF_MGLS_SHIFT 0
+#define CG_SYS_ROM_SHIFT 1
+#define CG_SYS_MC_MGCG_SHIFT 2
+#define CG_SYS_MC_MGLS_SHIFT 3
+#define CG_SYS_SDMA_MGCG_SHIFT 4
+#define CG_SYS_SDMA_MGLS_SHIFT 5
+#define CG_SYS_DRM_MGCG_SHIFT 6
+#define CG_SYS_HDP_MGCG_SHIFT 7
+#define CG_SYS_HDP_MGLS_SHIFT 8
+#define CG_SYS_DRM_MGLS_SHIFT 9
+#define CG_SYS_BIF_MGCG_SHIFT 10
+
+#define CG_SYS_BIF_MGLS_MASK 0x1
+#define CG_SYS_ROM_MASK 0x2
+#define CG_SYS_MC_MGCG_MASK 0x4
+#define CG_SYS_MC_MGLS_MASK 0x8
+#define CG_SYS_SDMA_MGCG_MASK 0x10
+#define CG_SYS_SDMA_MGLS_MASK 0x20
+#define CG_SYS_DRM_MGCG_MASK 0x40
+#define CG_SYS_HDP_MGCG_MASK 0x80
+#define CG_SYS_HDP_MGLS_MASK 0x100
+#define CG_SYS_DRM_MGLS_MASK 0x200
+#define CG_SYS_BIF_MGCG_MASK 0x400
+
+#define CG_GFX_BITMASK_FIRST_BIT 16
+#define CG_GFX_BITMASK_LAST_BIT 24
+
+#define CG_GFX_CGCG_SHIFT 16
+#define CG_GFX_CGLS_SHIFT 17
+#define CG_CPF_MGCG_SHIFT 18
+#define CG_RLC_MGCG_SHIFT 19
+#define CG_GFX_OTHERS_MGCG_SHIFT 20
+#define CG_GFX_3DCG_SHIFT 21
+#define CG_GFX_3DLS_SHIFT 22
+#define CG_GFX_RLC_LS_SHIFT 23
+#define CG_GFX_CP_LS_SHIFT 24
+
+#define CG_GFX_CGCG_MASK 0x00010000
+#define CG_GFX_CGLS_MASK 0x00020000
+#define CG_CPF_MGCG_MASK 0x00040000
+#define CG_RLC_MGCG_MASK 0x00080000
+#define CG_GFX_OTHERS_MGCG_MASK 0x00100000
+#define CG_GFX_3DCG_MASK 0x00200000
+#define CG_GFX_3DLS_MASK 0x00400000
+#define CG_GFX_RLC_LS_MASK 0x00800000
+#define CG_GFX_CP_LS_MASK 0x01000000
+
+
+#define VRCONF_VDDC_MASK 0x000000FF
+#define VRCONF_VDDC_SHIFT 0
+#define VRCONF_VDDGFX_MASK 0x0000FF00
+#define VRCONF_VDDGFX_SHIFT 8
+#define VRCONF_VDDCI_MASK 0x00FF0000
+#define VRCONF_VDDCI_SHIFT 16
+#define VRCONF_MVDD_MASK 0xFF000000
+#define VRCONF_MVDD_SHIFT 24
+
+#define VR_MERGED_WITH_VDDC 0
+#define VR_SVI2_PLANE_1 1
+#define VR_SVI2_PLANE_2 2
+#define VR_SMIO_PATTERN_1 3
+#define VR_SMIO_PATTERN_2 4
+#define VR_STATIC_VOLTAGE 5
+
+#define CLOCK_STRETCHER_MAX_ENTRIES 0x4
+#define CKS_LOOKUPTable_MAX_ENTRIES 0x4
+
+#define CLOCK_STRETCHER_SETTING_DDT_MASK 0x01
+#define CLOCK_STRETCHER_SETTING_DDT_SHIFT 0x0
+#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_MASK 0x1E
+#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_SHIFT 0x1
+#define CLOCK_STRETCHER_SETTING_ENABLE_MASK 0x80
+#define CLOCK_STRETCHER_SETTING_ENABLE_SHIFT 0x7
+
+struct SMU_ClockStretcherDataTableEntry {
+ uint8_t minVID;
+ uint8_t maxVID;
+
+ uint16_t setting;
+};
+typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
+
+struct SMU_ClockStretcherDataTable {
+ SMU_ClockStretcherDataTableEntry ClockStretcherDataTableEntry[CLOCK_STRETCHER_MAX_ENTRIES];
+};
+typedef struct SMU_ClockStretcherDataTable SMU_ClockStretcherDataTable;
+
+struct SMU_CKS_LOOKUPTableEntry {
+ uint16_t minFreq;
+ uint16_t maxFreq;
+
+ uint8_t setting;
+ uint8_t padding[3];
+};
+typedef struct SMU_CKS_LOOKUPTableEntry SMU_CKS_LOOKUPTableEntry;
+
+struct SMU_CKS_LOOKUPTable {
+ SMU_CKS_LOOKUPTableEntry CKS_LOOKUPTableEntry[CKS_LOOKUPTable_MAX_ENTRIES];
+};
+typedef struct SMU_CKS_LOOKUPTable SMU_CKS_LOOKUPTable;
+
+struct AgmAvfsData_t {
+ uint16_t avgPsmCount[28];
+ uint16_t minPsmCount[28];
+};
+typedef struct AgmAvfsData_t AgmAvfsData_t;
+
+enum VFT_COLUMNS {
+ SCLK0,
+ SCLK1,
+ SCLK2,
+ SCLK3,
+ SCLK4,
+ SCLK5,
+ SCLK6,
+ SCLK7,
+
+ NUM_VFT_COLUMNS
+};
+enum {
+ SCS_FUSE_T0,
+ SCS_FUSE_T1,
+ NUM_SCS_FUSE_TEMPERATURE
+};
+enum {
+ SCKS_ON,
+ SCKS_OFF,
+ NUM_SCKS_STATE_TYPES
+};
+
+#define VFT_TABLE_DEFINED
+
+#define TEMP_RANGE_MAXSTEPS 12
+struct VFT_CELL_t {
+ uint16_t Voltage;
+};
+
+typedef struct VFT_CELL_t VFT_CELL_t;
+#ifdef SMU__FIRMWARE_SCKS_PRESENT__1
+struct SCS_CELL_t {
+ uint16_t PsmCnt[NUM_SCKS_STATE_TYPES];
+};
+typedef struct SCS_CELL_t SCS_CELL_t;
+#endif
+
+struct VFT_TABLE_t {
+ VFT_CELL_t Cell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS];
+ uint16_t AvfsGbv [NUM_VFT_COLUMNS];
+ uint16_t BtcGbv [NUM_VFT_COLUMNS];
+ int16_t Temperature [TEMP_RANGE_MAXSTEPS];
+
+#ifdef SMU__FIRMWARE_SCKS_PRESENT__1
+ SCS_CELL_t ScksCell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS];
+#endif
+
+ uint8_t NumTemperatureSteps;
+ uint8_t padding[3];
+};
+typedef struct VFT_TABLE_t VFT_TABLE_t;
+
+#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2
+#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2
+
+struct GB_VDROOP_TABLE_t {
+ int32_t a0;
+ int32_t a1;
+ int32_t a2;
+ uint32_t spare;
+};
+typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t;
+
+struct SMU_QuadraticCoeffs {
+ int32_t m1;
+ int32_t b;
+
+ int16_t m2;
+ uint8_t m1_shift;
+ uint8_t m2_shift;
+};
+typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
+
+struct AVFS_Margin_t {
+ VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_Margin_t AVFS_Margin_t;
+
+struct AVFS_CksOff_Gbv_t {
+ VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t;
+
+struct AVFS_CksOff_AvfsGbv_t {
+ VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_CksOff_AvfsGbv_t AVFS_CksOff_AvfsGbv_t;
+
+struct AVFS_CksOff_BtcGbv_t {
+ VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_CksOff_BtcGbv_t AVFS_CksOff_BtcGbv_t;
+
+struct AVFS_meanNsigma_t {
+ uint32_t Aconstant[3];
+ uint16_t DC_tol_sigma;
+ uint16_t Platform_mean;
+ uint16_t Platform_sigma;
+ uint16_t PSM_Age_CompFactor;
+ uint8_t Static_Voltage_Offset[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t;
+
+struct AVFS_Sclk_Offset_t {
+ uint16_t Sclk_Offset[8];
+};
+typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t;
+
+struct Power_Sharing_t {
+ uint32_t EnergyCounter;
+ uint32_t EngeryThreshold;
+ uint64_t AM_SCLK_CNT;
+ uint64_t AM_0_BUSY_CNT;
+};
+typedef struct Power_Sharing_t Power_Sharing_t;
+
+
+#endif
+
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h
new file mode 100644
index 0000000..b64e58a2
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h
@@ -0,0 +1,886 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU75_DISCRETE_H
+#define SMU75_DISCRETE_H
+
+#include "smu75.h"
+
+#pragma pack(push, 1)
+
+#define NUM_SCLK_RANGE 8
+
+#define VCO_3_6 1
+#define VCO_2_4 3
+
+#define POSTDIV_DIV_BY_1 0
+#define POSTDIV_DIV_BY_2 1
+#define POSTDIV_DIV_BY_4 2
+#define POSTDIV_DIV_BY_8 3
+#define POSTDIV_DIV_BY_16 4
+
+struct sclkFcwRange_t {
+ uint8_t vco_setting; /* 1: 3-6GHz, 3: 2-4GHz */
+ uint8_t postdiv; /* divide by 2^n */
+ uint16_t fcw_pcc;
+ uint16_t fcw_trans_upper;
+ uint16_t fcw_trans_lower;
+};
+typedef struct sclkFcwRange_t sclkFcwRange_t;
+
+struct SMIO_Pattern {
+ uint16_t Voltage;
+ uint8_t Smio;
+ uint8_t padding;
+};
+
+typedef struct SMIO_Pattern SMIO_Pattern;
+
+struct SMIO_Table {
+ SMIO_Pattern Pattern[SMU_MAX_SMIO_LEVELS];
+};
+
+typedef struct SMIO_Table SMIO_Table;
+
+struct SMU_SclkSetting {
+ uint32_t SclkFrequency;
+ uint16_t Fcw_int;
+ uint16_t Fcw_frac;
+ uint16_t Pcc_fcw_int;
+ uint8_t PllRange;
+ uint8_t SSc_En;
+ uint16_t Sclk_slew_rate;
+ uint16_t Pcc_up_slew_rate;
+ uint16_t Pcc_down_slew_rate;
+ uint16_t Fcw1_int;
+ uint16_t Fcw1_frac;
+ uint16_t Sclk_ss_slew_rate;
+};
+typedef struct SMU_SclkSetting SMU_SclkSetting;
+
+struct SMU75_Discrete_GraphicsLevel {
+ SMU_VoltageLevel MinVoltage;
+
+ uint8_t pcieDpmLevel;
+ uint8_t DeepSleepDivId;
+ uint16_t ActivityLevel;
+
+ uint32_t CgSpllFuncCntl3;
+ uint32_t CgSpllFuncCntl4;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+
+ uint8_t SclkDid;
+ uint8_t padding;
+ uint8_t EnabledForActivity;
+ uint8_t EnabledForThrottle;
+ uint8_t UpHyst;
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t PowerThrottle;
+
+ SMU_SclkSetting SclkSetting;
+
+ uint8_t ScksStretchThreshVid[NUM_SCKS_STATE_TYPES];
+ uint16_t Padding;
+};
+
+typedef struct SMU75_Discrete_GraphicsLevel SMU75_Discrete_GraphicsLevel;
+
+struct SMU75_Discrete_ACPILevel {
+ uint32_t Flags;
+ SMU_VoltageLevel MinVoltage;
+ uint32_t SclkFrequency;
+ uint8_t SclkDid;
+ uint8_t DisplayWatermark;
+ uint8_t DeepSleepDivId;
+ uint8_t padding;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+
+ SMU_SclkSetting SclkSetting;
+};
+
+typedef struct SMU75_Discrete_ACPILevel SMU75_Discrete_ACPILevel;
+
+struct SMU75_Discrete_Ulv {
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint16_t VddcOffset;
+ uint8_t VddcOffsetVid;
+ uint8_t VddcPhase;
+ uint16_t BifSclkDfs;
+ uint16_t Reserved;
+};
+
+typedef struct SMU75_Discrete_Ulv SMU75_Discrete_Ulv;
+
+struct SMU75_Discrete_MemoryLevel {
+ SMU_VoltageLevel MinVoltage;
+ uint32_t MinMvdd;
+
+ uint32_t MclkFrequency;
+
+ uint8_t StutterEnable;
+ uint8_t EnabledForThrottle;
+ uint8_t EnabledForActivity;
+ uint8_t padding_0;
+
+ uint8_t UpHyst;
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t padding_1;
+
+ uint16_t ActivityLevel;
+ uint8_t DisplayWatermark;
+ uint8_t padding_2;
+
+ uint16_t Fcw_int;
+ uint16_t Fcw_frac;
+ uint8_t Postdiv;
+ uint8_t padding_3[3];
+};
+
+typedef struct SMU75_Discrete_MemoryLevel SMU75_Discrete_MemoryLevel;
+
+struct SMU75_Discrete_LinkLevel {
+ uint8_t PcieGenSpeed;
+ uint8_t PcieLaneCount;
+ uint8_t EnabledForActivity;
+ uint8_t SPC;
+ uint32_t DownThreshold;
+ uint32_t UpThreshold;
+ uint16_t BifSclkDfs;
+ uint16_t Reserved;
+};
+
+typedef struct SMU75_Discrete_LinkLevel SMU75_Discrete_LinkLevel;
+
+
+/* MC ARB DRAM Timing registers. */
+struct SMU75_Discrete_MCArbDramTimingTableEntry {
+ uint32_t McArbDramTiming;
+ uint32_t McArbDramTiming2;
+ uint32_t McArbBurstTime;
+ uint32_t McArbRfshRate;
+ uint32_t McArbMisc3;
+};
+
+typedef struct SMU75_Discrete_MCArbDramTimingTableEntry SMU75_Discrete_MCArbDramTimingTableEntry;
+
+struct SMU75_Discrete_MCArbDramTimingTable {
+ SMU75_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
+};
+
+typedef struct SMU75_Discrete_MCArbDramTimingTable SMU75_Discrete_MCArbDramTimingTable;
+
+/* UVD VCLK/DCLK state (level) definition. */
+struct SMU75_Discrete_UvdLevel {
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ SMU_VoltageLevel MinVoltage;
+ uint8_t VclkDivider;
+ uint8_t DclkDivider;
+ uint8_t padding[2];
+};
+
+typedef struct SMU75_Discrete_UvdLevel SMU75_Discrete_UvdLevel;
+
+/* Clocks for other external blocks (VCE, ACP, SAMU). */
+struct SMU75_Discrete_ExtClkLevel {
+ uint32_t Frequency;
+ SMU_VoltageLevel MinVoltage;
+ uint8_t Divider;
+ uint8_t padding[3];
+};
+
+typedef struct SMU75_Discrete_ExtClkLevel SMU75_Discrete_ExtClkLevel;
+
+struct SMU75_Discrete_StateInfo {
+ uint32_t SclkFrequency;
+ uint32_t MclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint32_t SamclkFrequency;
+ uint32_t AclkFrequency;
+ uint32_t EclkFrequency;
+ uint16_t MvddVoltage;
+ uint16_t padding16;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+ uint8_t McRegIndex;
+ uint8_t SeqIndex;
+ uint8_t SclkDid;
+ int8_t SclkIndex;
+ int8_t MclkIndex;
+ uint8_t PCIeGen;
+};
+
+typedef struct SMU75_Discrete_StateInfo SMU75_Discrete_StateInfo;
+
+struct SMU75_Discrete_DpmTable {
+ SMU75_PIDController GraphicsPIDController;
+ SMU75_PIDController MemoryPIDController;
+ SMU75_PIDController LinkPIDController;
+
+ uint32_t SystemFlags;
+
+ uint32_t VRConfig;
+ uint32_t SmioMask1;
+ uint32_t SmioMask2;
+ SMIO_Table SmioTable1;
+ SMIO_Table SmioTable2;
+
+ uint32_t MvddLevelCount;
+
+ uint8_t BapmVddcVidHiSidd [SMU75_MAX_LEVELS_VDDC];
+ uint8_t BapmVddcVidLoSidd [SMU75_MAX_LEVELS_VDDC];
+ uint8_t BapmVddcVidHiSidd2 [SMU75_MAX_LEVELS_VDDC];
+
+ uint8_t GraphicsDpmLevelCount;
+ uint8_t MemoryDpmLevelCount;
+ uint8_t LinkLevelCount;
+ uint8_t MasterDeepSleepControl;
+
+ uint8_t UvdLevelCount;
+ uint8_t VceLevelCount;
+ uint8_t AcpLevelCount;
+ uint8_t SamuLevelCount;
+
+ uint8_t ThermOutGpio;
+ uint8_t ThermOutPolarity;
+ uint8_t ThermOutMode;
+ uint8_t BootPhases;
+
+ uint8_t VRHotLevel;
+ uint8_t LdoRefSel;
+
+ uint8_t Reserved1[2];
+
+ uint16_t FanStartTemperature;
+ uint16_t FanStopTemperature;
+
+ uint16_t MaxVoltage;
+ uint16_t Reserved2;
+ uint32_t Reserved;
+
+ SMU75_Discrete_GraphicsLevel GraphicsLevel [SMU75_MAX_LEVELS_GRAPHICS];
+ SMU75_Discrete_MemoryLevel MemoryACPILevel;
+ SMU75_Discrete_MemoryLevel MemoryLevel [SMU75_MAX_LEVELS_MEMORY];
+ SMU75_Discrete_LinkLevel LinkLevel [SMU75_MAX_LEVELS_LINK];
+ SMU75_Discrete_ACPILevel ACPILevel;
+ SMU75_Discrete_UvdLevel UvdLevel [SMU75_MAX_LEVELS_UVD];
+ SMU75_Discrete_ExtClkLevel VceLevel [SMU75_MAX_LEVELS_VCE];
+ SMU75_Discrete_ExtClkLevel AcpLevel [SMU75_MAX_LEVELS_ACP];
+ SMU75_Discrete_ExtClkLevel SamuLevel [SMU75_MAX_LEVELS_SAMU];
+ SMU75_Discrete_Ulv Ulv;
+
+ uint8_t DisplayWatermark [SMU75_MAX_LEVELS_MEMORY][SMU75_MAX_LEVELS_GRAPHICS];
+
+ uint32_t SclkStepSize;
+ uint32_t Smio [SMU75_MAX_ENTRIES_SMIO];
+
+ uint8_t UvdBootLevel;
+ uint8_t VceBootLevel;
+ uint8_t AcpBootLevel;
+ uint8_t SamuBootLevel;
+
+ uint8_t GraphicsBootLevel;
+ uint8_t GraphicsVoltageChangeEnable;
+ uint8_t GraphicsThermThrottleEnable;
+ uint8_t GraphicsInterval;
+
+ uint8_t VoltageInterval;
+ uint8_t ThermalInterval;
+ uint16_t TemperatureLimitHigh;
+
+ uint16_t TemperatureLimitLow;
+ uint8_t MemoryBootLevel;
+ uint8_t MemoryVoltageChangeEnable;
+
+ uint16_t BootMVdd;
+ uint8_t MemoryInterval;
+ uint8_t MemoryThermThrottleEnable;
+
+ uint16_t VoltageResponseTime;
+ uint16_t PhaseResponseTime;
+
+ uint8_t PCIeBootLinkLevel;
+ uint8_t PCIeGenInterval;
+ uint8_t DTEInterval;
+ uint8_t DTEMode;
+
+ uint8_t SVI2Enable;
+ uint8_t VRHotGpio;
+ uint8_t AcDcGpio;
+ uint8_t ThermGpio;
+
+ uint16_t PPM_PkgPwrLimit;
+ uint16_t PPM_TemperatureLimit;
+
+ uint16_t DefaultTdp;
+ uint16_t TargetTdp;
+
+ uint16_t FpsHighThreshold;
+ uint16_t FpsLowThreshold;
+
+ uint16_t BAPMTI_R [SMU75_DTE_ITERATIONS][SMU75_DTE_SOURCES][SMU75_DTE_SINKS];
+ uint16_t BAPMTI_RC [SMU75_DTE_ITERATIONS][SMU75_DTE_SOURCES][SMU75_DTE_SINKS];
+
+ uint16_t TemperatureLimitEdge;
+ uint16_t TemperatureLimitHotspot;
+
+ uint16_t BootVddc;
+ uint16_t BootVddci;
+
+ uint16_t FanGainEdge;
+ uint16_t FanGainHotspot;
+
+ uint32_t LowSclkInterruptThreshold;
+ uint32_t VddGfxReChkWait;
+
+ uint8_t ClockStretcherAmount;
+ uint8_t Sclk_CKS_masterEn0_7;
+ uint8_t Sclk_CKS_masterEn8_15;
+ uint8_t DPMFreezeAndForced;
+
+ uint8_t Sclk_voltageOffset[8];
+
+ SMU_ClockStretcherDataTable ClockStretcherDataTable;
+ SMU_CKS_LOOKUPTable CKS_LOOKUPTable;
+
+ uint32_t CurrSclkPllRange;
+ sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE];
+
+ GB_VDROOP_TABLE_t BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES];
+ SMU_QuadraticCoeffs AVFSGB_FUSE_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES];
+};
+
+typedef struct SMU75_Discrete_DpmTable SMU75_Discrete_DpmTable;
+
+struct SMU75_Discrete_FanTable {
+ uint16_t FdoMode;
+ int16_t TempMin;
+ int16_t TempMed;
+ int16_t TempMax;
+ int16_t Slope1;
+ int16_t Slope2;
+ int16_t FdoMin;
+ int16_t HystUp;
+ int16_t HystDown;
+ int16_t HystSlope;
+ int16_t TempRespLim;
+ int16_t TempCurr;
+ int16_t SlopeCurr;
+ int16_t PwmCurr;
+ uint32_t RefreshPeriod;
+ int16_t FdoMax;
+ uint8_t TempSrc;
+ int8_t Padding;
+};
+
+typedef struct SMU75_Discrete_FanTable SMU75_Discrete_FanTable;
+
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG 4
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG)
+
+
+
+struct SMU7_MclkDpmScoreboard {
+ uint32_t PercentageBusy;
+
+ int32_t PIDError;
+ int32_t PIDIntegral;
+ int32_t PIDOutput;
+
+ uint32_t SigmaDeltaAccum;
+ uint32_t SigmaDeltaOutput;
+ uint32_t SigmaDeltaLevel;
+
+ uint32_t UtilizationSetpoint;
+
+ uint8_t TdpClampMode;
+ uint8_t TdcClampMode;
+ uint8_t ThermClampMode;
+ uint8_t VoltageBusy;
+
+ int8_t CurrLevel;
+ int8_t TargLevel;
+ uint8_t LevelChangeInProgress;
+ uint8_t UpHyst;
+
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t DpmEnable;
+ uint8_t DpmRunning;
+
+ uint8_t DpmForce;
+ uint8_t DpmForceLevel;
+ uint8_t padding2;
+ uint8_t McArbIndex;
+
+ uint32_t MinimumPerfMclk;
+
+ uint8_t AcpiReq;
+ uint8_t AcpiAck;
+ uint8_t MclkSwitchInProgress;
+ uint8_t MclkSwitchCritical;
+
+ uint8_t IgnoreVBlank;
+ uint8_t TargetMclkIndex;
+ uint8_t TargetMvddIndex;
+ uint8_t MclkSwitchResult;
+
+ uint16_t VbiFailureCount;
+ uint8_t VbiWaitCounter;
+ uint8_t EnabledLevelsChange;
+
+ uint16_t LevelResidencyCounters [SMU75_MAX_LEVELS_MEMORY];
+ uint16_t LevelSwitchCounters [SMU75_MAX_LEVELS_MEMORY];
+
+ void (*TargetStateCalculator)(uint8_t);
+ void (*SavedTargetStateCalculator)(uint8_t);
+
+ uint16_t AutoDpmInterval;
+ uint16_t AutoDpmRange;
+
+ uint16_t VbiTimeoutCount;
+ uint16_t MclkSwitchingTime;
+
+ uint8_t fastSwitch;
+ uint8_t Save_PIC_VDDGFX_EXIT;
+ uint8_t Save_PIC_VDDGFX_ENTER;
+ uint8_t VbiTimeout;
+
+ uint32_t HbmTempRegBackup;
+};
+
+typedef struct SMU7_MclkDpmScoreboard SMU7_MclkDpmScoreboard;
+
+struct SMU7_UlvScoreboard {
+ uint8_t EnterUlv;
+ uint8_t ExitUlv;
+ uint8_t UlvActive;
+ uint8_t WaitingForUlv;
+ uint8_t UlvEnable;
+ uint8_t UlvRunning;
+ uint8_t UlvMasterEnable;
+ uint8_t padding;
+ uint32_t UlvAbortedCount;
+ uint32_t UlvTimeStamp;
+};
+
+typedef struct SMU7_UlvScoreboard SMU7_UlvScoreboard;
+
+struct VddgfxSavedRegisters {
+ uint32_t GPU_DBG[3];
+ uint32_t MEC_BaseAddress_Hi;
+ uint32_t MEC_BaseAddress_Lo;
+ uint32_t THM_TMON0_CTRL2__RDIR_PRESENT;
+ uint32_t THM_TMON1_CTRL2__RDIR_PRESENT;
+ uint32_t CP_INT_CNTL;
+};
+
+typedef struct VddgfxSavedRegisters VddgfxSavedRegisters;
+
+struct SMU7_VddGfxScoreboard {
+ uint8_t VddGfxEnable;
+ uint8_t VddGfxActive;
+ uint8_t VPUResetOccured;
+ uint8_t padding;
+
+ uint32_t VddGfxEnteredCount;
+ uint32_t VddGfxAbortedCount;
+
+ uint32_t VddGfxVid;
+
+ VddgfxSavedRegisters SavedRegisters;
+};
+
+typedef struct SMU7_VddGfxScoreboard SMU7_VddGfxScoreboard;
+
+struct SMU7_TdcLimitScoreboard {
+ uint8_t Enable;
+ uint8_t Running;
+ uint16_t Alpha;
+ uint32_t FilteredIddc;
+ uint32_t IddcLimit;
+ uint32_t IddcHyst;
+ SMU7_HystController_Data HystControllerData;
+};
+
+typedef struct SMU7_TdcLimitScoreboard SMU7_TdcLimitScoreboard;
+
+struct SMU7_PkgPwrLimitScoreboard {
+ uint8_t Enable;
+ uint8_t Running;
+ uint16_t Alpha;
+ uint32_t FilteredPkgPwr;
+ uint32_t Limit;
+ uint32_t Hyst;
+ uint32_t LimitFromDriver;
+ uint8_t PowerSharingEnabled;
+ uint8_t PowerSharingCounter;
+ uint8_t PowerSharingINTEnabled;
+ uint8_t GFXActivityCounterEnabled;
+ uint32_t EnergyCount;
+ uint32_t PSACTCount;
+ uint8_t RollOverRequired;
+ uint8_t RollOverCount;
+ uint8_t padding[2];
+ SMU7_HystController_Data HystControllerData;
+};
+
+typedef struct SMU7_PkgPwrLimitScoreboard SMU7_PkgPwrLimitScoreboard;
+
+struct SMU7_BapmScoreboard {
+ uint32_t source_powers[SMU75_DTE_SOURCES];
+ uint32_t source_powers_last[SMU75_DTE_SOURCES];
+ int32_t entity_temperatures[SMU75_NUM_GPU_TES];
+ int32_t initial_entity_temperatures[SMU75_NUM_GPU_TES];
+ int32_t Limit;
+ int32_t Hyst;
+ int32_t therm_influence_coeff_table[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS * 2];
+ int32_t therm_node_table[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS];
+ uint16_t ConfigTDPPowerScalar;
+ uint16_t FanSpeedPowerScalar;
+ uint16_t OverDrivePowerScalar;
+ uint16_t OverDriveLimitScalar;
+ uint16_t FinalPowerScalar;
+ uint8_t VariantID;
+ uint8_t spare997;
+
+ SMU7_HystController_Data HystControllerData;
+
+ int32_t temperature_gradient_slope;
+ int32_t temperature_gradient;
+ uint32_t measured_temperature;
+};
+
+
+typedef struct SMU7_BapmScoreboard SMU7_BapmScoreboard;
+
+struct SMU7_AcpiScoreboard {
+ uint32_t SavedInterruptMask[2];
+ uint8_t LastACPIRequest;
+ uint8_t CgBifResp;
+ uint8_t RequestType;
+ uint8_t Padding;
+ SMU75_Discrete_ACPILevel D0Level;
+};
+
+typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
+
+struct SMU75_Discrete_PmFuses {
+ uint8_t BapmVddCVidHiSidd[8];
+
+ uint8_t BapmVddCVidLoSidd[8];
+
+ uint8_t VddCVid[8];
+
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t SviLoadLineTrimVddC;
+ uint8_t SviLoadLineOffsetVddC;
+
+ uint16_t TDC_VDDC_PkgLimit;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+
+ uint8_t TdcWaterfallCtl;
+ uint8_t LPMLTemperatureMin;
+ uint8_t LPMLTemperatureMax;
+ uint8_t Reserved;
+
+ uint8_t LPMLTemperatureScaler[16];
+
+ int16_t FuzzyFan_ErrorSetDelta;
+ int16_t FuzzyFan_ErrorRateSetDelta;
+ int16_t FuzzyFan_PwmSetDelta;
+ uint16_t Reserved6;
+
+ uint8_t GnbLPML[16];
+
+ uint8_t GnbLPMLMaxVid;
+ uint8_t GnbLPMLMinVid;
+ uint8_t Reserved1[2];
+
+ uint16_t BapmVddCBaseLeakageHiSidd;
+ uint16_t BapmVddCBaseLeakageLoSidd;
+
+ uint16_t VFT_Temp[3];
+ uint8_t Version;
+ uint8_t padding;
+
+ SMU_QuadraticCoeffs VFT_ATE[3];
+
+ SMU_QuadraticCoeffs AVFS_GB;
+ SMU_QuadraticCoeffs ATE_ACBTC_GB;
+
+ SMU_QuadraticCoeffs P2V;
+
+ uint32_t PsmCharzFreq;
+
+ uint16_t InversionVoltage;
+ uint16_t PsmCharzTemp;
+
+ uint32_t EnabledAvfsModules;
+
+ SMU_QuadraticCoeffs BtcGbv_CksOff;
+};
+
+typedef struct SMU75_Discrete_PmFuses SMU75_Discrete_PmFuses;
+
+struct SMU7_Discrete_Log_Header_Table {
+ uint32_t version;
+ uint32_t asic_id;
+ uint16_t flags;
+ uint16_t entry_size;
+ uint32_t total_size;
+ uint32_t num_of_entries;
+ uint8_t type;
+ uint8_t mode;
+ uint8_t filler_0[2];
+ uint32_t filler_1[2];
+};
+
+typedef struct SMU7_Discrete_Log_Header_Table SMU7_Discrete_Log_Header_Table;
+
+struct SMU7_Discrete_Log_Cntl {
+ uint8_t Enabled;
+ uint8_t Type;
+ uint8_t padding[2];
+ uint32_t BufferSize;
+ uint32_t SamplesLogged;
+ uint32_t SampleSize;
+ uint32_t AddrL;
+ uint32_t AddrH;
+};
+
+typedef struct SMU7_Discrete_Log_Cntl SMU7_Discrete_Log_Cntl;
+
+#if defined SMU__DGPU_ONLY
+#define CAC_ACC_NW_NUM_OF_SIGNALS 87
+#endif
+
+
+struct SMU7_Discrete_Cac_Collection_Table {
+ uint32_t temperature;
+ uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS];
+};
+
+typedef struct SMU7_Discrete_Cac_Collection_Table SMU7_Discrete_Cac_Collection_Table;
+
+struct SMU7_Discrete_Cac_Verification_Table {
+ uint32_t VddcTotalPower;
+ uint32_t VddcLeakagePower;
+ uint32_t VddcConstantPower;
+ uint32_t VddcGfxDynamicPower;
+ uint32_t VddcUvdDynamicPower;
+ uint32_t VddcVceDynamicPower;
+ uint32_t VddcAcpDynamicPower;
+ uint32_t VddcPcieDynamicPower;
+ uint32_t VddcDceDynamicPower;
+ uint32_t VddcCurrent;
+ uint32_t VddcVoltage;
+ uint32_t VddciTotalPower;
+ uint32_t VddciLeakagePower;
+ uint32_t VddciConstantPower;
+ uint32_t VddciDynamicPower;
+ uint32_t Vddr1TotalPower;
+ uint32_t Vddr1LeakagePower;
+ uint32_t Vddr1ConstantPower;
+ uint32_t Vddr1DynamicPower;
+ uint32_t spare[4];
+ uint32_t temperature;
+};
+
+typedef struct SMU7_Discrete_Cac_Verification_Table SMU7_Discrete_Cac_Verification_Table;
+
+struct SMU7_Discrete_Pm_Status_Table {
+ int32_t T_meas_max[SMU75_THERMAL_INPUT_LOOP_COUNT];
+ int32_t T_meas_acc[SMU75_THERMAL_INPUT_LOOP_COUNT];
+
+ uint32_t I_calc_max;
+ uint32_t I_calc_acc;
+ uint32_t P_meas_acc;
+ uint32_t V_meas_load_acc;
+ uint32_t I_meas_acc;
+ uint32_t P_meas_acc_vddci;
+ uint32_t V_meas_load_acc_vddci;
+ uint32_t I_meas_acc_vddci;
+
+ uint16_t Sclk_dpm_residency[8];
+ uint16_t Uvd_dpm_residency[8];
+ uint16_t Vce_dpm_residency[8];
+ uint16_t Mclk_dpm_residency[4];
+
+ uint32_t P_roc_acc;
+ uint32_t PkgPwr_max;
+ uint32_t PkgPwr_acc;
+ uint32_t MclkSwitchingTime_max;
+ uint32_t MclkSwitchingTime_acc;
+ uint32_t FanPwm_acc;
+ uint32_t FanRpm_acc;
+ uint32_t Gfx_busy_acc;
+ uint32_t Mc_busy_acc;
+ uint32_t Fps_acc;
+
+ uint32_t AccCnt;
+};
+
+typedef struct SMU7_Discrete_Pm_Status_Table SMU7_Discrete_Pm_Status_Table;
+
+struct SMU7_Discrete_AutoWattMan_Status_Table {
+ int32_t T_meas_acc[SMU75_THERMAL_INPUT_LOOP_COUNT];
+ uint16_t Sclk_dpm_residency[8];
+ uint16_t Mclk_dpm_residency[4];
+ uint32_t TgpPwr_acc;
+ uint32_t Gfx_busy_acc;
+ uint32_t Mc_busy_acc;
+ uint32_t AccCnt;
+};
+
+typedef struct SMU7_Discrete_AutoWattMan_Status_Table SMU7_Discrete_AutoWattMan_Status_Table;
+
+#define SMU7_MAX_GFX_CU_COUNT 24
+#define SMU7_MIN_GFX_CU_COUNT 8
+#define SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_SHIFT 0
+#define SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_MASK (0xFFFF << SMU7_GFX_CU_PG_ENABLE_DC_MAX_CU_SHIFT)
+#define SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_SHIFT 16
+#define SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_MASK (0xFFFF << SMU7_GFX_CU_PG_ENABLE_AC_MAX_CU_SHIFT)
+
+struct SMU7_GfxCuPgScoreboard {
+ uint8_t Enabled;
+ uint8_t WaterfallUp;
+ uint8_t WaterfallDown;
+ uint8_t WaterfallLimit;
+ uint8_t CurrMaxCu;
+ uint8_t TargMaxCu;
+ uint8_t ClampMode;
+ uint8_t Active;
+ uint8_t MaxSupportedCu;
+ uint8_t MinSupportedCu;
+ uint8_t PendingGfxCuHostInterrupt;
+ uint8_t LastFilteredMaxCuInteger;
+ uint16_t FilteredMaxCu;
+ uint16_t FilteredMaxCuAlpha;
+ uint16_t FilterResetCount;
+ uint16_t FilterResetCountLimit;
+ uint8_t ForceCu;
+ uint8_t ForceCuCount;
+ uint8_t AcModeMaxCu;
+ uint8_t DcModeMaxCu;
+};
+
+typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard;
+
+#define SMU7_SCLK_CAC 0x561
+#define SMU7_MCLK_CAC 0xF9
+#define SMU7_VCLK_CAC 0x2DE
+#define SMU7_DCLK_CAC 0x2DE
+#define SMU7_ECLK_CAC 0x25E
+#define SMU7_ACLK_CAC 0x25E
+#define SMU7_SAMCLK_CAC 0x25E
+#define SMU7_DISPCLK_CAC 0x100
+#define SMU7_CAC_CONSTANT 0x2EE3430
+#define SMU7_CAC_CONSTANT_SHIFT 18
+
+#define SMU7_VDDCI_MCLK_CONST 1765
+#define SMU7_VDDCI_MCLK_CONST_SHIFT 16
+#define SMU7_VDDCI_VDDCI_CONST 50958
+#define SMU7_VDDCI_VDDCI_CONST_SHIFT 14
+#define SMU7_VDDCI_CONST 11781
+#define SMU7_VDDCI_STROBE_PWR 1331
+
+#define SMU7_VDDR1_CONST 693
+#define SMU7_VDDR1_CAC_WEIGHT 20
+#define SMU7_VDDR1_CAC_WEIGHT_SHIFT 19
+#define SMU7_VDDR1_STROBE_PWR 512
+
+#define SMU7_AREA_COEFF_UVD 0xA78
+#define SMU7_AREA_COEFF_VCE 0x190A
+#define SMU7_AREA_COEFF_ACP 0x22D1
+#define SMU7_AREA_COEFF_SAMU 0x534
+
+#define SMU7_THERM_OUT_MODE_DISABLE 0x0
+#define SMU7_THERM_OUT_MODE_THERM_ONLY 0x1
+#define SMU7_THERM_OUT_MODE_THERM_VRHOT 0x2
+
+#define SQ_Enable_MASK 0x1
+#define SQ_IR_MASK 0x2
+#define SQ_PCC_MASK 0x4
+#define SQ_EDC_MASK 0x8
+
+#define TCP_Enable_MASK 0x100
+#define TCP_IR_MASK 0x200
+#define TCP_PCC_MASK 0x400
+#define TCP_EDC_MASK 0x800
+
+#define TD_Enable_MASK 0x10000
+#define TD_IR_MASK 0x20000
+#define TD_PCC_MASK 0x40000
+#define TD_EDC_MASK 0x80000
+
+#define DB_Enable_MASK 0x1000000
+#define DB_IR_MASK 0x2000000
+#define DB_PCC_MASK 0x4000000
+#define DB_EDC_MASK 0x8000000
+
+#define SQ_Enable_SHIFT 0
+#define SQ_IR_SHIFT 1
+#define SQ_PCC_SHIFT 2
+#define SQ_EDC_SHIFT 3
+
+#define TCP_Enable_SHIFT 8
+#define TCP_IR_SHIFT 9
+#define TCP_PCC_SHIFT 10
+#define TCP_EDC_SHIFT 11
+
+#define TD_Enable_SHIFT 16
+#define TD_IR_SHIFT 17
+#define TD_PCC_SHIFT 18
+#define TD_EDC_SHIFT 19
+
+#define DB_Enable_SHIFT 24
+#define DB_IR_SHIFT 25
+#define DB_PCC_SHIFT 26
+#define DB_EDC_SHIFT 27
+
+#define PMFUSES_AVFSSIZE 104
+
+#define BTCGB0_Vdroop_Enable_MASK 0x1
+#define BTCGB1_Vdroop_Enable_MASK 0x2
+#define AVFSGB0_Vdroop_Enable_MASK 0x4
+#define AVFSGB1_Vdroop_Enable_MASK 0x8
+
+#define BTCGB0_Vdroop_Enable_SHIFT 0
+#define BTCGB1_Vdroop_Enable_SHIFT 1
+#define AVFSGB0_Vdroop_Enable_SHIFT 2
+#define AVFSGB1_Vdroop_Enable_SHIFT 3
+
+#pragma pack(pop)
+
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
index c3ed737..715b5a1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
@@ -131,6 +131,7 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_RunAcgInOpenLoop 0x5E
#define PPSMC_MSG_InitializeAcg 0x5F
#define PPSMC_MSG_GetCurrPkgPwr 0x61
+#define PPSMC_MSG_GetAverageGfxclkActualFrequency 0x63
#define PPSMC_MSG_SetPccThrottleLevel 0x67
#define PPSMC_MSG_UpdatePkgPwrPidAlpha 0x68
#define PPSMC_Message_Count 0x69
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 9587550..0a20040 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -26,7 +26,7 @@
SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
polaris10_smumgr.o iceland_smumgr.o \
smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \
- vega12_smumgr.o
+ vega12_smumgr.o vegam_smumgr.o
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 08d0001..2d4ec8a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -61,9 +61,6 @@
#define SMC_RAM_END 0x40000
-#define VOLTAGE_SCALE 4
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
#define CISLAND_MINIMUM_ENGINE_CLOCK 800
#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
@@ -211,9 +208,7 @@ static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
int ret;
- if (!ci_is_smc_ram_running(hwmgr))
- return -EINVAL;
-
+ cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
@@ -1182,7 +1177,6 @@ static int ci_populate_single_memory_level(
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
bool dll_state_on;
- struct cgs_display_info info = {0};
uint32_t mclk_edc_wr_enable_threshold = 40000;
uint32_t mclk_edc_enable_threshold = 40000;
uint32_t mclk_strobe_mode_threshold = 40000;
@@ -1236,8 +1230,7 @@ static int ci_populate_single_memory_level(
/* default set to low watermark. Highest level will be set to high later.*/
memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- cgs_get_active_displays_info(hwmgr->device, &info);
- data->display_timing.num_existing_displays = info.display_count;
+ data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
/* stutter mode not support on ci */
@@ -2784,7 +2777,6 @@ static int ci_smu_fini(struct pp_hwmgr *hwmgr)
{
kfree(hwmgr->smu_backend);
hwmgr->smu_backend = NULL;
- cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index faef783..53df940 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -53,10 +53,7 @@
#define FIJI_SMC_SIZE 0x20000
-#define VOLTAGE_SCALE 4
#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
#define VDDC_VDDCI_DELTA 300
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -288,8 +285,7 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend);
/* Only start SMC if SMC RAM is not running */
- if (!(smu7_is_smc_ram_running(hwmgr)
- || cgs_is_virtualization_enabled(hwmgr->device))) {
+ if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
/* Check if SMU is running in protected mode */
if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC,
@@ -307,13 +303,13 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr)
}
/* To initialize all clock gating before RLC loaded and running.*/
- cgs_set_clockgating_state(hwmgr->device,
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(hwmgr->device,
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(hwmgr->device,
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(hwmgr->device,
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE);
/* Setup SoftRegsStart here for register lookup in case
@@ -335,10 +331,10 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
uint32_t efuse = 0;
uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1;
- if (cgs_is_virtualization_enabled(hwmgr->device))
- return 0;
+ if (!hwmgr->not_vf)
+ return false;
- if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
+ if (!atomctrl_read_efuse(hwmgr, AVFS_EN_LSB, AVFS_EN_MSB,
mask, &efuse)) {
if (efuse)
return true;
@@ -989,11 +985,11 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
threshold = clock * data->fast_watermark_threshold / 100;
- data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+ data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
- hwmgr->display_config.min_core_set_clock_in_sr);
+ hwmgr->display_config->min_core_set_clock_in_sr);
/* Default to slow, highest DPM level will be
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index d4bb934..415f691 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -60,10 +60,7 @@
#define ICELAND_SMC_SIZE 0x20000
-#define VOLTAGE_SCALE 4
#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
#define MC_CG_ARB_FREQ_F1 0x0b
#define VDDC_VDDCI_DELTA 200
@@ -932,7 +929,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
graphic_level->PowerThrottle = 0;
data->display_timing.min_clock_in_sr =
- hwmgr->display_config.min_core_set_clock_in_sr;
+ hwmgr->display_config->min_core_set_clock_in_sr;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep))
@@ -1236,7 +1233,6 @@ static int iceland_populate_single_memory_level(
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
bool dll_state_on;
- struct cgs_display_info info = {0};
uint32_t mclk_edc_wr_enable_threshold = 40000;
uint32_t mclk_edc_enable_threshold = 40000;
uint32_t mclk_strobe_mode_threshold = 40000;
@@ -1283,8 +1279,7 @@ static int iceland_populate_single_memory_level(
/* default set to low watermark. Highest level will be set to high later.*/
memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- cgs_get_active_displays_info(hwmgr->device, &info);
- data->display_timing.num_existing_displays = info.display_count;
+ data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
/* stutter mode not support on iceland */
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 997a777..a8c6524 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -52,8 +52,6 @@
#include "dce/dce_10_0_sh_mask.h"
#define POLARIS10_SMC_SIZE 0x20000
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
#define POWERTUNE_DEFAULT_SET_MAX 1
#define VDDC_VDDCI_DELTA 200
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -295,25 +293,16 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
/* Only start SMC if SMC RAM is not running */
- if (!(smu7_is_smc_ram_running(hwmgr)
- || cgs_is_virtualization_enabled(hwmgr->device))) {
+ if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
/* Check if SMU is running in protected mode */
- if (smu_data->protected_mode == 0) {
+ if (smu_data->protected_mode == 0)
result = polaris10_start_smu_in_non_protection_mode(hwmgr);
- } else {
+ else
result = polaris10_start_smu_in_protection_mode(hwmgr);
- /* If failed, try with different security Key. */
- if (result != 0) {
- smu_data->smu7_data.security_hard_key ^= 1;
- cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
- result = polaris10_start_smu_in_protection_mode(hwmgr);
- }
- }
-
if (result != 0)
PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
@@ -951,11 +940,11 @@ static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
level->DownHyst = data->current_profile_setting.sclk_down_hyst;
level->VoltageDownHyst = 0;
level->PowerThrottle = 0;
- data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+ data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
- hwmgr->display_config.min_core_set_clock_in_sr);
+ hwmgr->display_config->min_core_set_clock_in_sr);
/* Default to slow, highest DPM level will be
* set to PPSMC_DISPLAY_WATERMARK_LOW later.
@@ -1085,11 +1074,9 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
int result = 0;
- struct cgs_display_info info = {0, 0, NULL};
uint32_t mclk_stutter_mode_threshold = 40000;
phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
- cgs_get_active_displays_info(hwmgr->device, &info);
if (hwmgr->od_enabled)
vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk;
@@ -1115,7 +1102,7 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
mem_level->StutterEnable = false;
mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- data->display_timing.num_existing_displays = info.display_count;
+ data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
if (mclk_stutter_mode_threshold &&
(clock <= mclk_stutter_mode_threshold) &&
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index bc53f2b..0a563f6 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -23,7 +23,7 @@
#include "smumgr.h"
#include "smu10_inc.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
#include "smu10_smumgr.h"
#include "ppatomctrl.h"
#include "rv_ppsmc.h"
@@ -33,8 +33,6 @@
#include "pp_debug.h"
-#define VOLTAGE_SCALE 4
-
#define BUFFER_SIZE 80000
#define MAX_STRING_SIZE 15
#define BUFFER_SIZETWO 131072
@@ -49,48 +47,41 @@
static uint32_t smu10_wait_for_response(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t reg;
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+ reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
phm_wait_for_register_unequal(hwmgr, reg,
0, MP1_C2PMSG_90__CONTENT_MASK);
- return cgs_read_register(hwmgr->device, reg);
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
}
static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg)
{
- uint32_t reg;
+ struct amdgpu_device *adev = hwmgr->adev;
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
- cgs_write_register(hwmgr->device, reg, msg);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
return 0;
}
static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
{
- uint32_t reg;
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
+ struct amdgpu_device *adev = hwmgr->adev;
- return cgs_read_register(hwmgr->device, reg);
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
}
static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- uint32_t reg;
+ struct amdgpu_device *adev = hwmgr->adev;
smu10_wait_for_response(hwmgr);
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(hwmgr->device, reg, 0);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
@@ -104,17 +95,13 @@ static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
- uint32_t reg;
+ struct amdgpu_device *adev = hwmgr->adev;
smu10_wait_for_response(hwmgr);
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(hwmgr->device, reg, 0);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(hwmgr->device, reg, parameter);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 0399c10..64d33b7 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -167,24 +167,25 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
int ret;
- if (!smu7_is_smc_ram_running(hwmgr))
- return -EINVAL;
-
-
PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
- if (ret != 1)
- pr_info("\n failed to send pre message %x ret is %d \n", msg, ret);
+ if (ret == 0xFE)
+ pr_debug("last message was not supported\n");
+ else if (ret != 1)
+ pr_info("\n last message was failed ret is %d\n", ret);
+ cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
- if (ret != 1)
+ if (ret == 0xFE)
+ pr_debug("message %x was not supported\n", msg);
+ else if (ret != 1)
pr_info("\n failed to send message %x ret is %d \n", msg, ret);
return 0;
@@ -199,10 +200,6 @@ int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
- if (!smu7_is_smc_ram_running(hwmgr)) {
- return -EINVAL;
- }
-
PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
@@ -231,16 +228,6 @@ int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr)
-{
- if (!smu7_is_smc_ram_running(hwmgr))
- return -EINVAL;
-
- PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
- return 0;
-}
-
-
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
{
enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
@@ -375,7 +362,7 @@ static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
entry->meta_data_addr_low = 0;
/* digest need be excluded out */
- if (cgs_is_virtualization_enabled(hwmgr->device))
+ if (!hwmgr->not_vf)
info.image_size -= 20;
entry->data_size_byte = info.image_size;
entry->num_register_entries = 0;
@@ -409,7 +396,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
0x0);
if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
- if (!cgs_is_virtualization_enabled(hwmgr->device)) {
+ if (hwmgr->not_vf) {
smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_HI,
upper_32_bits(smu_data->smu_buffer.mc_addr));
@@ -467,7 +454,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- if (cgs_is_virtualization_enabled(hwmgr->device))
+ if (!hwmgr->not_vf)
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
@@ -608,7 +595,7 @@ int smu7_init(struct pp_hwmgr *hwmgr)
smu_data->header = smu_data->header_buffer.kaddr;
smu_data->header_buffer.mc_addr = mc_addr;
- if (cgs_is_virtualization_enabled(hwmgr->device))
+ if (!hwmgr->not_vf)
return 0;
smu_data->smu_buffer.data_size = 200*4096;
@@ -643,13 +630,12 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
&smu_data->header_buffer.mc_addr,
&smu_data->header_buffer.kaddr);
- if (!cgs_is_virtualization_enabled(hwmgr->device))
+ if (hwmgr->not_vf)
amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle,
&smu_data->smu_buffer.mc_addr,
&smu_data->smu_buffer.kaddr);
kfree(hwmgr->smu_backend);
hwmgr->smu_backend = NULL;
- cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 126d300..39c9bfd 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -67,7 +67,6 @@ int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg,
int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter);
int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr);
-int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr);
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index c28b60a..ee236df 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -41,6 +41,7 @@ MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
+MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
MODULE_FIRMWARE("amdgpu/vega12_smc.bin");
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index b51d746..782b19f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -55,11 +55,7 @@
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
-
-#define VOLTAGE_SCALE 4
#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
#define MC_CG_ARB_FREQ_F1 0x0b
#define VDDC_VDDCI_DELTA 200
@@ -199,8 +195,7 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr)
int result;
/* Only start SMC if SMC RAM is not running */
- if (!(smu7_is_smc_ram_running(hwmgr) ||
- cgs_is_virtualization_enabled(hwmgr->device))) {
+ if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
/*Check if SMU is running in protected mode*/
if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_FIRMWARE, SMU_MODE)) {
@@ -651,7 +646,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
graphic_level->PowerThrottle = 0;
data->display_timing.min_clock_in_sr =
- hwmgr->display_config.min_core_set_clock_in_sr;
+ hwmgr->display_config->min_core_set_clock_in_sr;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep))
@@ -957,18 +952,17 @@ static int tonga_populate_single_memory_level(
SMU72_Discrete_MemoryLevel *memory_level
)
{
- uint32_t mvdd = 0;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *pptable_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
- bool dll_state_on;
- struct cgs_display_info info = {0};
uint32_t mclk_edc_wr_enable_threshold = 40000;
uint32_t mclk_stutter_mode_threshold = 30000;
uint32_t mclk_edc_enable_threshold = 40000;
uint32_t mclk_strobe_mode_threshold = 40000;
phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL;
+ int result = 0;
+ bool dll_state_on;
+ uint32_t mvdd = 0;
if (hwmgr->od_enabled)
vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk;
@@ -1009,8 +1003,7 @@ static int tonga_populate_single_memory_level(
/* default set to low watermark. Highest level will be set to high later.*/
memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- cgs_get_active_displays_info(hwmgr->device, &info);
- data->display_timing.num_existing_displays = info.display_count;
+ data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
if ((mclk_stutter_mode_threshold != 0) &&
(memory_clock <= mclk_stutter_mode_threshold) &&
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 4aafb04..e84669c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -23,7 +23,7 @@
#include "smumgr.h"
#include "vega10_inc.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
#include "vega10_smumgr.h"
#include "vega10_hwmgr.h"
#include "vega10_ppsmc.h"
@@ -35,8 +35,6 @@
#define AVFS_EN_MSB 1568
#define AVFS_EN_LSB 1568
-#define VOLTAGE_SCALE 4
-
/* Microcode file is stored in this buffer */
#define BUFFER_SIZE 80000
#define MAX_STRING_SIZE 15
@@ -54,18 +52,13 @@
static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
- uint32_t mp1_fw_flags, reg;
-
- reg = soc15_get_register_offset(NBIF_HWID, 0,
- mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t mp1_fw_flags;
- cgs_write_register(hwmgr->device, reg,
+ WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
- reg = soc15_get_register_offset(NBIF_HWID, 0,
- mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
-
- mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
+ mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
return true;
@@ -81,11 +74,11 @@ static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
*/
static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t reg;
uint32_t ret;
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+ reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
ret = phm_wait_for_register_unequal(hwmgr, reg,
0, MP1_C2PMSG_90__CONTENT_MASK);
@@ -93,7 +86,7 @@ static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
if (ret)
pr_err("No response from smu\n");
- return cgs_read_register(hwmgr->device, reg);
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
}
/*
@@ -105,11 +98,9 @@ static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg)
{
- uint32_t reg;
+ struct amdgpu_device *adev = hwmgr->adev;
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
- cgs_write_register(hwmgr->device, reg, msg);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
return 0;
}
@@ -122,14 +113,12 @@ static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
*/
static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- uint32_t reg;
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t ret;
vega10_wait_for_response(hwmgr);
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(hwmgr->device, reg, 0);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
@@ -150,18 +139,14 @@ static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
- uint32_t reg;
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t ret;
vega10_wait_for_response(hwmgr);
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(hwmgr->device, reg, 0);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(hwmgr->device, reg, parameter);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
@@ -174,12 +159,9 @@ static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
static int vega10_get_argument(struct pp_hwmgr *hwmgr)
{
- uint32_t reg;
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
+ struct amdgpu_device *adev = hwmgr->adev;
- return cgs_read_register(hwmgr->device, reg);
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
}
static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 651a3f2..7d9b40e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -23,7 +23,7 @@
#include "smumgr.h"
#include "vega12_inc.h"
-#include "pp_soc15.h"
+#include "soc15_common.h"
#include "vega12_smumgr.h"
#include "vega12_ppsmc.h"
#include "vega12/smu9_driver_if.h"
@@ -44,18 +44,13 @@
static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
- uint32_t mp1_fw_flags, reg;
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t mp1_fw_flags;
- reg = soc15_get_register_offset(NBIF_HWID, 0,
- mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
-
- cgs_write_register(hwmgr->device, reg,
+ WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
- reg = soc15_get_register_offset(NBIF_HWID, 0,
- mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
-
- mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
+ mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
@@ -72,15 +67,15 @@ static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr)
*/
static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
uint32_t reg;
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
+ reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
phm_wait_for_register_unequal(hwmgr, reg,
0, MP1_C2PMSG_90__CONTENT_MASK);
- return cgs_read_register(hwmgr->device, reg);
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
}
/*
@@ -92,11 +87,9 @@ static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr)
int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg)
{
- uint32_t reg;
+ struct amdgpu_device *adev = hwmgr->adev;
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
- cgs_write_register(hwmgr->device, reg, msg);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
return 0;
}
@@ -109,13 +102,11 @@ int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
*/
int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- uint32_t reg;
+ struct amdgpu_device *adev = hwmgr->adev;
vega12_wait_for_response(hwmgr);
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(hwmgr->device, reg, 0);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
@@ -135,17 +126,13 @@ int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
- uint32_t reg;
+ struct amdgpu_device *adev = hwmgr->adev;
vega12_wait_for_response(hwmgr);
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(hwmgr->device, reg, 0);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(hwmgr->device, reg, parameter);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
@@ -166,11 +153,9 @@ int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
int vega12_send_msg_to_smc_with_parameter_without_waiting(
struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
- uint32_t reg;
+ struct amdgpu_device *adev = hwmgr->adev;
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
- cgs_write_register(hwmgr->device, reg, parameter);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, parameter);
return vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
}
@@ -183,12 +168,9 @@ int vega12_send_msg_to_smc_with_parameter_without_waiting(
*/
int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
{
- uint32_t reg;
-
- reg = soc15_get_register_offset(MP1_HWID, 0,
- mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
+ struct amdgpu_device *adev = hwmgr->adev;
- *arg = cgs_read_register(hwmgr->device, reg);
+ *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
new file mode 100644
index 0000000..c9a5633
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -0,0 +1,2382 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "pp_debug.h"
+#include "smumgr.h"
+#include "smu_ucode_xfer_vi.h"
+#include "vegam_smumgr.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gca/gfx_8_0_d.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "ppatomctrl.h"
+#include "cgs_common.h"
+#include "smu7_ppsmc.h"
+
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+
+#define PPVEGAM_TARGETACTIVITY_DFLT 50
+
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VDDC_VDDCI_DELTA 200
+#define MC_CG_ARB_FREQ_F1 0x0b
+
+#define STRAP_ASIC_RO_LSB 2168
+#define STRAP_ASIC_RO_MSB 2175
+
+#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
+#define PPSMC_MSG_EnableModeSwitchRLCNotification ((uint16_t) 0x305)
+
+static const struct vegam_pt_defaults
+vegam_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+ /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
+ { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
+};
+
+static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
+ {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
+ {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
+
+static int vegam_smu_init(struct pp_hwmgr *hwmgr)
+{
+ struct vegam_smumgr *smu_data;
+
+ smu_data = kzalloc(sizeof(struct vegam_smumgr), GFP_KERNEL);
+ if (smu_data == NULL)
+ return -ENOMEM;
+
+ hwmgr->smu_backend = smu_data;
+
+ if (smu7_init(hwmgr)) {
+ kfree(smu_data);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vegam_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+
+ /* Wait for smc boot up */
+ /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
+
+ /* Assert reset */
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+
+ result = smu7_upload_smu_firmware_image(hwmgr);
+ if (result != 0)
+ return result;
+
+ /* Clear status */
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
+
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+
+ /* De-assert reset */
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
+
+
+ /* Call Test SMU message with 0x20000 offset to trigger SMU start */
+ smu7_send_msg_to_smc_offset(hwmgr);
+
+ /* Wait done bit to be set */
+ /* Check pass/failed indicator */
+
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
+
+ if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMU_STATUS, SMU_PASS))
+ PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
+
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+ /* Wait for firmware to initialize */
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
+
+ return result;
+}
+
+static int vegam_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+
+ /* wait for smc boot up */
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
+
+ /* Clear firmware interrupt enable flag */
+ /* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixFIRMWARE_FLAGS, 0);
+
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL,
+ rst_reg, 1);
+
+ result = smu7_upload_smu_firmware_image(hwmgr);
+ if (result != 0)
+ return result;
+
+ /* Set smc instruct start point at 0x0 */
+ smu7_program_jump_on_start(hwmgr);
+
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+ /* Wait for firmware to initialize */
+
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
+ FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
+
+ return result;
+}
+
+static int vegam_start_smu(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+ /* Only start SMC if SMC RAM is not running */
+ if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) {
+ smu_data->protected_mode = (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
+ smu_data->smu7_data.security_hard_key = (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD(
+ hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
+
+ /* Check if SMU is running in protected mode */
+ if (smu_data->protected_mode == 0)
+ result = vegam_start_smu_in_non_protection_mode(hwmgr);
+ else
+ result = vegam_start_smu_in_protection_mode(hwmgr);
+
+ if (result != 0)
+ PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
+ }
+
+ /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
+ smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU75_Firmware_Header, SoftRegisters),
+ &(smu_data->smu7_data.soft_regs_start),
+ 0x40000);
+
+ result = smu7_request_smu_load_fw(hwmgr);
+
+ return result;
+}
+
+static int vegam_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU75_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU75_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->smu7_data.soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU75_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU75_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU75_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU75_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (0 != result);
+
+ return error ? -1 : 0;
+}
+
+static bool vegam_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
+
+static uint32_t vegam_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU75_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU75_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU75_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU75_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU75_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU75_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU75_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU75_MAX_LEVELS_MVDD;
+ case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
+ return SMU7_UVD_MCLK_HANDSHAKE_DISABLE |
+ SMU7_VCE_MCLK_HANDSHAKE_DISABLE;
+ }
+
+ pr_warn("can't get the mac of %x\n", value);
+ return 0;
+}
+
+static int vegam_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU75_Discrete_DpmTable,
+ UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ else
+ smu_data->smc_state_table.VceBootLevel = 0;
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU75_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int vegam_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+
+static int vegam_update_bif_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ int max_entry, i;
+
+ max_entry = (SMU75_MAX_LEVELS_LINK < pcie_table->count) ?
+ SMU75_MAX_LEVELS_LINK :
+ pcie_table->count;
+ /* Setup BIF_SCLK levels */
+ for (i = 0; i < max_entry; i++)
+ smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
+ return 0;
+}
+
+static int vegam_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ vegam_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ vegam_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ vegam_update_samu_smc_table(hwmgr);
+ break;
+ case SMU_BIF_TABLE:
+ vegam_update_bif_smc_table(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void vegam_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &vegam_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &vegam_power_tune_data_set_array[0];
+
+}
+
+static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU75_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count, level;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ count = data->mvdd_voltage_table.count;
+ if (count > SMU_MAX_SMIO_LEVELS)
+ count = SMU_MAX_SMIO_LEVELS;
+ for (level = 0; level < count; level++) {
+ table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
+ data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+ table->SmioTable2.Pattern[level].Smio =
+ (uint8_t) level;
+ table->Smio[level] |=
+ data->mvdd_voltage_table.entries[level].smio_low;
+ }
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+ table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
+ }
+
+ return 0;
+}
+
+static int vegam_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
+ struct SMU75_Discrete_DpmTable *table)
+{
+ uint32_t count, level;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ count = data->vddci_voltage_table.count;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ if (count > SMU_MAX_SMIO_LEVELS)
+ count = SMU_MAX_SMIO_LEVELS;
+ for (level = 0; level < count; ++level) {
+ table->SmioTable1.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
+ data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
+ table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
+
+ table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
+ }
+ }
+
+ table->SmioMask1 = data->vddci_voltage_table.mask_low;
+
+ return 0;
+}
+
+static int vegam_populate_cac_table(struct pp_hwmgr *hwmgr,
+ struct SMU75_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ /* tables is already swapped, so in order to use the value from it,
+ * we need to swap it back.
+ * We are populating vddc CAC data to BapmVddc table
+ * in split and merged mode
+ */
+ for (count = 0; count < lookup_table->count; count++) {
+ index = phm_get_voltage_index(lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] =
+ convert_to_vid(lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] =
+ convert_to_vid(lookup_table->entries[index].us_cac_mid);
+ table->BapmVddcVidHiSidd2[count] =
+ convert_to_vid(lookup_table->entries[index].us_cac_high);
+ }
+
+ return 0;
+}
+
+static int vegam_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ struct SMU75_Discrete_DpmTable *table)
+{
+ vegam_populate_smc_vddci_table(hwmgr, table);
+ vegam_populate_smc_mvdd_table(hwmgr, table);
+ vegam_populate_cac_table(hwmgr, table);
+
+ return 0;
+}
+
+static int vegam_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU75_Discrete_Ulv *state)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int vegam_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU75_Discrete_DpmTable *table)
+{
+ return vegam_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int vegam_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+ struct SMU75_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct vegam_smumgr *smu_data =
+ (struct vegam_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int i;
+
+ /* Index (dpm_table->pcie_speed_table.count)
+ * is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+ dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+
+/* To Do move to hwmgr */
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+ uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+ uint32_t i;
+ uint16_t vddci;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ *voltage = *mvdd = 0;
+
+ /* clock - voltage dependency table is empty table */
+ if (dep_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < dep_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (dep_table->entries[i].clk >= clock) {
+ *voltage |= (dep_table->entries[i].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i].vddci)
+ *voltage |= (dep_table->entries[i].vddci *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+ VOLTAGE_SCALE;
+
+ *voltage |= 1 << PHASES_SHIFT;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i - 1].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i - 1].vddci)
+ *voltage |= (dep_table->entries[i - 1].vddci *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ else
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+ return 0;
+}
+
+static void vegam_get_sclk_range_table(struct pp_hwmgr *hwmgr,
+ SMU75_Discrete_DpmTable *table)
+{
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ uint32_t i, ref_clk;
+
+ struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
+
+ ref_clk = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
+
+ if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ table->SclkFcwRangeTable[i].vco_setting =
+ range_table_from_vbios.entry[i].ucVco_setting;
+ table->SclkFcwRangeTable[i].postdiv =
+ range_table_from_vbios.entry[i].ucPostdiv;
+ table->SclkFcwRangeTable[i].fcw_pcc =
+ range_table_from_vbios.entry[i].usFcw_pcc;
+
+ table->SclkFcwRangeTable[i].fcw_trans_upper =
+ range_table_from_vbios.entry[i].usFcw_trans_upper;
+ table->SclkFcwRangeTable[i].fcw_trans_lower =
+ range_table_from_vbios.entry[i].usRcw_trans_lower;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+ }
+ return;
+ }
+
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ smu_data->range_table[i].trans_lower_frequency =
+ (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
+ smu_data->range_table[i].trans_upper_frequency =
+ (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
+
+ table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
+ table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
+ table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
+
+ table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
+ table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+ }
+}
+
+static int vegam_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, SMU_SclkSetting *sclk_setting)
+{
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ const SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct pp_atomctrl_clock_dividers_ai dividers;
+ uint32_t ref_clock;
+ uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
+ uint8_t i;
+ int result;
+ uint64_t temp;
+
+ sclk_setting->SclkFrequency = clock;
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
+ if (result == 0) {
+ sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
+ sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
+ sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
+ sclk_setting->PllRange = dividers.ucSclkPllRange;
+ sclk_setting->Sclk_slew_rate = 0x400;
+ sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
+ sclk_setting->Pcc_down_slew_rate = 0xffff;
+ sclk_setting->SSc_En = dividers.ucSscEnable;
+ sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
+ sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
+ sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
+ return result;
+ }
+
+ ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
+
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ if (clock > smu_data->range_table[i].trans_lower_frequency
+ && clock <= smu_data->range_table[i].trans_upper_frequency) {
+ sclk_setting->PllRange = i;
+ break;
+ }
+ }
+
+ sclk_setting->Fcw_int = (uint16_t)
+ ((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
+ ref_clock);
+ temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+ temp <<= 0x10;
+ do_div(temp, ref_clock);
+ sclk_setting->Fcw_frac = temp & 0xffff;
+
+ pcc_target_percent = 10; /* Hardcode 10% for now. */
+ pcc_target_freq = clock - (clock * pcc_target_percent / 100);
+ sclk_setting->Pcc_fcw_int = (uint16_t)
+ ((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
+ ref_clock);
+
+ ss_target_percent = 2; /* Hardcode 2% for now. */
+ sclk_setting->SSc_En = 0;
+ if (ss_target_percent) {
+ sclk_setting->SSc_En = 1;
+ ss_target_freq = clock - (clock * ss_target_percent / 100);
+ sclk_setting->Fcw1_int = (uint16_t)
+ ((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) /
+ ref_clock);
+ temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+ temp <<= 0x10;
+ do_div(temp, ref_clock);
+ sclk_setting->Fcw1_frac = temp & 0xffff;
+ }
+
+ return 0;
+}
+
+static uint8_t vegam_get_sleep_divider_id_from_clock(uint32_t clock,
+ uint32_t clock_insr)
+{
+ uint8_t i;
+ uint32_t temp;
+ uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
+
+ PP_ASSERT_WITH_CODE((clock >= min),
+ "Engine clock can't satisfy stutter requirement!",
+ return 0);
+ for (i = 31; ; i--) {
+ temp = clock / (i + 1);
+
+ if (temp >= min || i == 0)
+ break;
+ }
+ return i;
+}
+
+static int vegam_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU75_Discrete_GraphicsLevel *level)
+{
+ int result;
+ /* PP_Clocks minClocks; */
+ uint32_t mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMU_SclkSetting curr_sclk_setting = { 0 };
+
+ result = vegam_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
+
+ /* populate graphics levels */
+ result = vegam_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk, clock,
+ &level->MinVoltage, &mvdd);
+
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for "
+ "VDDC engine clock dependency table",
+ return result);
+ level->ActivityLevel = (uint16_t)(SclkDPMTuning_VEGAM >> DPMTuning_Activity_Shift);
+
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ level->EnabledForThrottle = 1;
+ level->VoltageDownHyst = 0;
+ level->PowerThrottle = 0;
+ data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId = vegam_get_sleep_divider_id_from_clock(clock,
+ hwmgr->display_config->min_core_set_clock_in_sr);
+
+ level->SclkSetting = curr_sclk_setting;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
+ return 0;
+}
+
+static int vegam_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
+ int result = 0;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU75_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU75_Discrete_GraphicsLevel) *
+ SMU75_MAX_LEVELS_GRAPHICS;
+ struct SMU75_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i, max_entry;
+ uint8_t hightest_pcie_level_enabled = 0,
+ lowest_pcie_level_enabled = 0,
+ mid_pcie_level_enabled = 0,
+ count = 0;
+
+ vegam_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+
+ result = vegam_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result)
+ return result;
+
+ levels[i].UpHyst = (uint8_t)
+ (SclkDPMTuning_VEGAM >> DPMTuning_Uphyst_Shift);
+ levels[i].DownHyst = (uint8_t)
+ (SclkDPMTuning_VEGAM >> DPMTuning_Downhyst_Shift);
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ levels[i].DeepSleepDivId = 0;
+ }
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SPLLShutdownSupport))
+ smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++)
+ levels[i].EnabledForActivity =
+ (hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask >> i) & 0x1;
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_cnt - 1;
+ for (i = 0; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ } else {
+ while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (hightest_pcie_level_enabled + 1))) != 0))
+ hightest_pcie_level_enabled++;
+
+ while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0))
+ lowest_pcie_level_enabled++;
+
+ while ((count < hightest_pcie_level_enabled) &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+ count++;
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+ hightest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled + 1 + count) :
+ hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to hightest_pcie_level_enabled */
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled */
+ levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled */
+ levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+static int vegam_calculate_mclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU75_Discrete_MemoryLevel *mem_level)
+{
+ struct pp_atomctrl_memory_clock_param_ai mpll_param;
+
+ PP_ASSERT_WITH_CODE(!atomctrl_get_memory_pll_dividers_ai(hwmgr,
+ clock, &mpll_param),
+ "Failed to retrieve memory pll parameter.",
+ return -EINVAL);
+
+ mem_level->MclkFrequency = (uint32_t)mpll_param.ulClock;
+ mem_level->Fcw_int = (uint16_t)mpll_param.ulMclk_fcw_int;
+ mem_level->Fcw_frac = (uint16_t)mpll_param.ulMclk_fcw_frac;
+ mem_level->Postdiv = (uint8_t)mpll_param.ulPostDiv;
+
+ return 0;
+}
+
+static int vegam_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU75_Discrete_MemoryLevel *mem_level)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ uint32_t mclk_stutter_mode_threshold = 60000;
+
+
+ if (table_info->vdd_dep_on_mclk) {
+ result = vegam_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk, clock,
+ &mem_level->MinVoltage, &mem_level->MinMvdd);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not find MinVddc voltage value from memory "
+ "VDDC voltage dependency table", return result);
+ }
+
+ result = vegam_calculate_mclk_params(hwmgr, clock, mem_level);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to calculate mclk params.",
+ return -EINVAL);
+
+ mem_level->EnabledForThrottle = 1;
+ mem_level->EnabledForActivity = 0;
+ mem_level->VoltageDownHyst = 0;
+ mem_level->ActivityLevel = (uint16_t)
+ (MemoryDPMTuning_VEGAM >> DPMTuning_Activity_Shift);
+ mem_level->StutterEnable = false;
+ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
+
+ if (mclk_stutter_mode_threshold &&
+ (clock <= mclk_stutter_mode_threshold) &&
+ (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE) & 0x1))
+ mem_level->StutterEnable = true;
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(mem_level->Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(mem_level->Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+ }
+
+ return result;
+}
+
+static int vegam_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+ int result;
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU75_Discrete_DpmTable, MemoryLevel);
+ uint32_t array_size = sizeof(SMU75_Discrete_MemoryLevel) *
+ SMU75_MAX_LEVELS_MEMORY;
+ struct SMU75_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = vegam_populate_single_memory_level(hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &levels[i]);
+
+ if (result)
+ return result;
+
+ levels[i].UpHyst = (uint8_t)
+ (MemoryDPMTuning_VEGAM >> DPMTuning_Uphyst_Shift);
+ levels[i].DownHyst = (uint8_t)
+ (MemoryDPMTuning_VEGAM >> DPMTuning_Downhyst_Shift);
+ }
+
+ smu_data->smc_state_table.MemoryDpmLevelCount =
+ (uint8_t)dpm_table->mclk_table.count;
+ hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++)
+ levels[i].EnabledForActivity =
+ (hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask >> i) & 0x1;
+
+ levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+static int vegam_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU75_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint32_t sclk_frequency;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMIO_Pattern vol_level;
+ uint32_t mvdd;
+ uint16_t us_mvdd;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ /* Get MinVoltage and Frequency from DPM0,
+ * already converted to SMC_UL */
+ sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
+ result = vegam_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk,
+ sclk_frequency,
+ &table->ACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE(!result,
+ "Cannot find ACPI VDDC voltage value "
+ "in Clock Dependency Table",
+ );
+
+ result = vegam_calculate_sclk_params(hwmgr, sclk_frequency,
+ &(table->ACPILevel.SclkSetting));
+ PP_ASSERT_WITH_CODE(!result,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ table->ACPILevel.DeepSleepDivId = 0;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
+
+
+ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+ table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
+ result = vegam_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk,
+ table->MemoryACPILevel.MclkFrequency,
+ &table->MemoryACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDCI voltage value "
+ "in Clock Dependency Table",
+ );
+
+ us_mvdd = 0;
+ if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled))
+ us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else {
+ if (!vegam_populate_mvdd_value(hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[0].value,
+ &vol_level))
+ us_mvdd = vol_level.Voltage;
+ }
+
+ if (!vegam_populate_mvdd_value(hwmgr, 0, &vol_level))
+ table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ table->MemoryACPILevel.StutterEnable = false;
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+ return result;
+}
+
+static int vegam_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU75_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->VceLevelCount = (uint8_t)(mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
+ table->VceLevel[count].MinVoltage |=
+ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
+ table->VceLevel[count].MinVoltage |=
+ (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /*retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int vegam_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU75_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
+ table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int vegam_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ SMU75_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ uint32_t dram_timing;
+ uint32_t dram_timing2;
+ uint32_t burst_time;
+ uint32_t rfsh_rate;
+ uint32_t misc3;
+
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ eng_clock, mem_clock);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.",
+ return result);
+
+ dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burst_time = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
+ rfsh_rate = cgs_read_register(hwmgr->device, mmMC_ARB_RFSH_RATE);
+ misc3 = cgs_read_register(hwmgr->device, mmMC_ARB_MISC3);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+ arb_regs->McArbBurstTime = PP_HOST_TO_SMC_UL(burst_time);
+ arb_regs->McArbRfshRate = PP_HOST_TO_SMC_UL(rfsh_rate);
+ arb_regs->McArbMisc3 = PP_HOST_TO_SMC_UL(misc3);
+
+ return 0;
+}
+
+static int vegam_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ struct SMU75_Discrete_MCArbDramTimingTable arb_regs = {0};
+ uint32_t i, j;
+ int result = 0;
+
+ for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
+ result = vegam_populate_memory_timing_parameters(hwmgr,
+ hw_data->dpm_table.sclk_table.dpm_levels[i].value,
+ hw_data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (result)
+ return result;
+ }
+ }
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU75_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END);
+ return result;
+}
+
+static int vegam_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ struct SMU75_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->UvdLevelCount = (uint8_t)(mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage |=
+ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+ }
+
+ return result;
+}
+
+static int vegam_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ struct SMU75_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table */
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(table->GraphicsBootLevel));
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(table->MemoryBootLevel));
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return 0;
+}
+
+static int vegam_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint8_t count, level;
+
+ count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+ hw_data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+ hw_data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+ uint32_t tmp;
+ tmp = raw_setting * 4096 / 100;
+ return (uint16_t)tmp;
+}
+
+static int vegam_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+ const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ struct pp_advance_fan_control_parameters *fan_table =
+ &hwmgr->thermal_controller.advanceFanControlParameters;
+ int i, j, k;
+ const uint16_t *pdef1;
+ const uint16_t *pdef2;
+
+ table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+ table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range!",
+ );
+
+ table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTargetOperatingTemp * 256);
+ table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitHotspot * 256);
+ table->FanGainEdge = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainEdge));
+ table->FanGainHotspot = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHotspot));
+
+ pdef1 = defaults->BAPMTI_R;
+ pdef2 = defaults->BAPMTI_RC;
+
+ for (i = 0; i < SMU75_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU75_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU75_DTE_SINKS; k++) {
+ table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
+ table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
+ pdef1++;
+ pdef2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
+ struct vegam_smumgr *smu_data =
+ (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+ uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+ uint32_t mask = (1 << ((STRAP_ASIC_RO_MSB - STRAP_ASIC_RO_LSB) + 1)) - 1;
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ atomctrl_read_efuse(hwmgr, STRAP_ASIC_RO_LSB, STRAP_ASIC_RO_MSB,
+ mask, &efuse);
+
+ min = 1200;
+ max = 2500;
+
+ ro = efuse * (max - min) / 255 + min;
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) *
+ 136418 - (ro - 70) * 1000000) /
+ (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+ volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 *
+ 3232 - (ro - 65) * 1000000) /
+ (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
+
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ smu_data->smc_state_table.LdoRefSel =
+ (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ?
+ table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 5;
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+static bool vegam_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
+{
+ uint32_t efuse;
+
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (49 * 4));
+ efuse &= 0x00000001;
+
+ if (efuse)
+ return true;
+
+ return false;
+}
+
+static int vegam_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+ SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ int result = 0;
+ struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+ AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+ AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+ uint32_t tmp, i;
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+ if (!hwmgr->avfs_supported)
+ return 0;
+
+ result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+ if (0 == result) {
+ table->BTCGB_VDROOP_TABLE[0].a0 =
+ PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+ table->BTCGB_VDROOP_TABLE[0].a1 =
+ PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+ table->BTCGB_VDROOP_TABLE[0].a2 =
+ PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+ table->BTCGB_VDROOP_TABLE[1].a0 =
+ PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+ table->BTCGB_VDROOP_TABLE[1].a1 =
+ PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+ table->BTCGB_VDROOP_TABLE[1].a2 =
+ PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+ table->AVFSGB_FUSE_TABLE[0].m1 =
+ PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+ table->AVFSGB_FUSE_TABLE[0].m2 =
+ PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+ table->AVFSGB_FUSE_TABLE[0].b =
+ PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+ table->AVFSGB_FUSE_TABLE[0].m1_shift = 24;
+ table->AVFSGB_FUSE_TABLE[0].m2_shift = 12;
+ table->AVFSGB_FUSE_TABLE[1].m1 =
+ PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+ table->AVFSGB_FUSE_TABLE[1].m2 =
+ PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+ table->AVFSGB_FUSE_TABLE[1].b =
+ PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+ table->AVFSGB_FUSE_TABLE[1].m1_shift = 24;
+ table->AVFSGB_FUSE_TABLE[1].m2_shift = 12;
+ table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+ AVFS_meanNsigma.Aconstant[0] =
+ PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+ AVFS_meanNsigma.Aconstant[1] =
+ PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+ AVFS_meanNsigma.Aconstant[2] =
+ PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+ AVFS_meanNsigma.DC_tol_sigma =
+ PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+ AVFS_meanNsigma.Platform_mean =
+ PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+ AVFS_meanNsigma.PSM_Age_CompFactor =
+ PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+ AVFS_meanNsigma.Platform_sigma =
+ PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+ for (i = 0; i < sclk_table->count; i++) {
+ AVFS_meanNsigma.Static_Voltage_Offset[i] =
+ (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+ AVFS_SclkOffset.Sclk_Offset[i] =
+ PP_HOST_TO_SMC_US((uint16_t)
+ (sclk_table->entries[i].sclk_offset) / 100);
+ }
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU75_Firmware_Header, AvfsMeanNSigma),
+ &tmp, SMC_RAM_END);
+ smu7_copy_bytes_to_smc(hwmgr,
+ tmp,
+ (uint8_t *)&AVFS_meanNsigma,
+ sizeof(AVFS_meanNsigma_t),
+ SMC_RAM_END);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU75_Firmware_Header, AvfsSclkOffsetTable),
+ &tmp, SMC_RAM_END);
+ smu7_copy_bytes_to_smc(hwmgr,
+ tmp,
+ (uint8_t *)&AVFS_SclkOffset,
+ sizeof(AVFS_Sclk_Offset_t),
+ SMC_RAM_END);
+
+ data->avfs_vdroop_override_setting =
+ (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+ data->apply_avfs_cks_off_voltage =
+ (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+ }
+ return result;
+}
+
+static int vegam_populate_vr_config(struct pp_hwmgr *hwmgr,
+ struct SMU75_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct vegam_smumgr *smu_data =
+ (struct vegam_smumgr *)(hwmgr->smu_backend);
+ uint16_t config;
+
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ PP_ASSERT_WITH_CODE(false,
+ "VDDC should be on SVI2 control in merged mode!",
+ );
+ }
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ }
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ if (config != VR_SVI2_PLANE_2) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC,
+ smu_data->smu7_data.soft_regs_start +
+ offsetof(SMU75_SoftRegisters, AllowMvddSwitch),
+ 0x1);
+ } else {
+ PP_ASSERT_WITH_CODE(false,
+ "SVI2 Plane 2 is already taken, set MVDD as Static",);
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig = (config << VRCONF_MVDD_SHIFT);
+ }
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ config = VR_SMIO_PATTERN_2;
+ table->VRConfig = (config << VRCONF_MVDD_SHIFT);
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC,
+ smu_data->smu7_data.soft_regs_start +
+ offsetof(SMU75_SoftRegisters, AllowMvddSwitch),
+ 0x1);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+static int vegam_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int vegam_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+ return 0;
+}
+
+static int vegam_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr,
+ fuse_table_offset +
+ offsetof(SMU75_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else {
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+ smu_data->power_tune_table.LPMLTemperatureMin =
+ (uint8_t)((temp >> 16) & 0xff);
+ smu_data->power_tune_table.LPMLTemperatureMax =
+ (uint8_t)((temp >> 8) & 0xff);
+ smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+ }
+ return 0;
+}
+
+static int vegam_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int vegam_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+/* TO DO move to hwmgr */
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+ || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int vegam_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int vegam_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+ return 0;
+}
+
+static int vegam_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU75_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ if (vegam_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+
+ if (vegam_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+
+ if (vegam_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ if (0 != vegam_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ if (vegam_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan Control parameters Failed!",
+ return -EINVAL);
+
+ if (vegam_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ if (vegam_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+ "Sidd Failed!", return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ (sizeof(struct SMU75_Discrete_PmFuses) - PMFUSES_AVFSSIZE),
+ SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int vegam_enable_reconfig_cus(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableModeSwitchRLCNotification,
+ adev->gfx.cu_info.number);
+
+ return 0;
+}
+
+static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ uint8_t i;
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+ struct phm_ppt_v1_gpio_table *gpio_table =
+ (struct phm_ppt_v1_gpio_table *)table_info->gpio_table;
+ pp_atomctrl_clock_dividers_vi dividers;
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+
+ vegam_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
+ vegam_populate_smc_voltage_tables(hwmgr, table);
+
+ table->SystemFlags = 0;
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (hw_data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = vegam_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ULV state!", return result);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
+ }
+
+ result = vegam_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Link Level!", return result);
+
+ result = vegam_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Graphics Level!", return result);
+
+ result = vegam_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Memory Level!", return result);
+
+ result = vegam_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ACPI Level!", return result);
+
+ result = vegam_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize VCE Level!", return result);
+
+ result = vegam_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize SAMU Level!", return result);
+
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = vegam_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to Write ARB settings for the initial state.", return result);
+
+ result = vegam_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize UVD Level!", return result);
+
+ result = vegam_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Boot Level!", return result);
+
+ result = vegam_populate_smc_initial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Boot State!", return result);
+
+ result = vegam_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate BAPM Parameters!", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = vegam_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate Clock Stretcher Data Table!",
+ return result);
+ }
+
+ result = vegam_populate_avfs_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate AVFS Parameters!", return result;);
+
+ table->CurrSclkPllRange = 0xff;
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+
+ PP_ASSERT_WITH_CODE(hw_data->dpm_table.pcie_speed_table.count >= 1,
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ table->PCIeBootLinkLevel =
+ hw_data->dpm_table.pcie_speed_table.count;
+ table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
+
+ result = vegam_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate VRConfig setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr,
+ VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ if (gpio_table)
+ table->VRHotLevel =
+ table_info->gpio_table->vrhot_triggered_sclk_dpm_index;
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr,
+ PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin)) {
+ table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition) &&
+ !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme))
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ /* Thermal Output GPIO */
+ if (atomctrl_get_pp_assign_pin(hwmgr,
+ THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin)) {
+ table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+ /* For porlarity read GPIOPAD_A with assigned Gpio pin
+ * since VBIOS will program this register to set 'inactive state',
+ * driver can then determine 'active state' from this and
+ * program SMU with correct polarity
+ */
+ table->ThermOutPolarity =
+ (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+ (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot) &&
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CombinePCCWithThermalSignal))
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ } else {
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ /* Populate BIF_SCLK levels into SMC DPM table */
+ for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ smu_data->bif_sclk_table[i], &dividers);
+ PP_ASSERT_WITH_CODE(!result,
+ "Can not find DFS divide id for Sclk",
+ return result);
+
+ if (i == 0)
+ table->Ulv.BifSclkDfs =
+ PP_HOST_TO_SMC_US((uint16_t)(dividers.pll_post_divider));
+ else
+ table->LinkLevel[i - 1].BifSclkDfs =
+ PP_HOST_TO_SMC_US((uint16_t)(dividers.pll_post_divider));
+ }
+
+ for (i = 0; i < SMU75_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU75_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU75_Discrete_DpmTable) - 3 * sizeof(SMU75_PIDController),
+ SMC_RAM_END);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to upload dpm data to SMC memory!", return result);
+
+ result = vegam_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+
+ result = vegam_enable_reconfig_cus(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to enable reconfigurable CUs!", return result);
+
+ return 0;
+}
+
+static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU75_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU75_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU75_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU75_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU75_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU75_SoftRegisters, UcodeLoadStatus);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU75_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU75_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU75_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU75_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU75_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU75_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU75_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+static int vegam_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK +
+ DPMTABLE_UPDATE_SCLK +
+ DPMTABLE_UPDATE_MCLK))
+ return vegam_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+static int vegam_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct vegam_smumgr *smu_data =
+ (struct vegam_smumgr *)(hwmgr->smu_backend);
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (data->low_sclk_interrupt_threshold != 0)) {
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU75_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to update SCLK threshold!", return result);
+
+ result = vegam_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+
+ return result;
+}
+
+int vegam_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int ret;
+
+ if (!hwmgr->avfs_supported)
+ return 0;
+
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ if (!ret) {
+ if (data->apply_avfs_cks_off_voltage)
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+ }
+
+ return ret;
+}
+
+static int vegam_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ PP_ASSERT_WITH_CODE(hwmgr->thermal_controller.fanInfo.bNoFan,
+ "VBIOS fan info is not correct!",
+ );
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+}
+
+const struct pp_smumgr_func vegam_smu_funcs = {
+ .smu_init = vegam_smu_init,
+ .smu_fini = smu7_smu_fini,
+ .start_smu = vegam_start_smu,
+ .check_fw_load_finish = smu7_check_fw_load_finish,
+ .request_smu_load_fw = smu7_reload_firmware,
+ .request_smu_load_specific_fw = NULL,
+ .send_msg_to_smc = smu7_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
+ .process_firmware_header = vegam_process_firmware_header,
+ .is_dpm_running = vegam_is_dpm_running,
+ .get_mac_definition = vegam_get_mac_definition,
+ .update_smc_table = vegam_update_smc_table,
+ .init_smc_table = vegam_init_smc_table,
+ .get_offsetof = vegam_get_offsetof,
+ .populate_all_graphic_levels = vegam_populate_all_graphic_levels,
+ .populate_all_memory_levels = vegam_populate_all_memory_levels,
+ .update_sclk_threshold = vegam_update_sclk_threshold,
+ .is_hw_avfs_present = vegam_is_hw_avfs_present,
+ .thermal_avfs_enable = vegam_thermal_avfs_enable,
+ .is_dpm_running = vegam_is_dpm_running,
+ .thermal_setup_fan_table = vegam_thermal_setup_fan_table,
+};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h
new file mode 100644
index 0000000..2b65582
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _VEGAM_SMUMANAGER_H
+#define _VEGAM_SMUMANAGER_H
+
+
+#include <pp_endian.h>
+#include "smu75_discrete.h"
+#include "smu7_smumgr.h"
+
+#define SMC_RAM_END 0x40000
+
+#define DPMTuning_Uphyst_Shift 0
+#define DPMTuning_Downhyst_Shift 8
+#define DPMTuning_Activity_Shift 16
+
+#define GraphicsDPMTuning_VEGAM 0x001e6400
+#define MemoryDPMTuning_VEGAM 0x000f3c0a
+#define SclkDPMTuning_VEGAM 0x002d000a
+#define MclkDPMTuning_VEGAM 0x001f100a
+
+
+struct vegam_pt_defaults {
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+ uint8_t TdcWaterfallCtl;
+ uint8_t DTEAmbientTempBase;
+
+ uint32_t DisplayCac;
+ uint32_t BAPM_TEMP_GRADIENT;
+ uint16_t BAPMTI_R[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS];
+ uint16_t BAPMTI_RC[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS];
+};
+
+struct vegam_range_table {
+ uint32_t trans_lower_frequency; /* in 10khz */
+ uint32_t trans_upper_frequency;
+};
+
+struct vegam_smumgr {
+ struct smu7_smumgr smu7_data;
+ uint8_t protected_mode;
+ SMU75_Discrete_DpmTable smc_state_table;
+ struct SMU75_Discrete_Ulv ulv_setting;
+ struct SMU75_Discrete_PmFuses power_tune_table;
+ struct vegam_range_table range_table[NUM_SCLK_RANGE];
+ const struct vegam_pt_defaults *power_tune_defaults;
+ uint32_t bif_sclk_table[SMU75_MAX_LEVELS_LINK];
+};
+
+
+#endif
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index ab50090..23e73c2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -116,7 +116,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
drm_sched_entity_init(&gpu->sched,
&ctx->sched_entity[i],
&gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
- 32, NULL);
+ NULL);
}
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 02baaaf..efbd581 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1145,7 +1145,6 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_framebuffer *radeon_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
@@ -1164,19 +1163,15 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
return 0;
}
- if (atomic) {
- radeon_fb = to_radeon_framebuffer(fb);
+ if (atomic)
target_fb = fb;
- }
- else {
- radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+ else
target_fb = crtc->primary->fb;
- }
/* If atomic, assume fb object is pinned & idle & fenced and
* just update base pointers
*/
- obj = radeon_fb->obj;
+ obj = target_fb->obj[0];
rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
@@ -1441,8 +1436,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
- radeon_fb = to_radeon_framebuffer(fb);
- rbo = gem_to_radeon_bo(radeon_fb->obj);
+ rbo = gem_to_radeon_bo(fb->obj[0]);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -1463,7 +1457,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
struct drm_framebuffer *target_fb;
@@ -1481,16 +1474,12 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
return 0;
}
- if (atomic) {
- radeon_fb = to_radeon_framebuffer(fb);
+ if (atomic)
target_fb = fb;
- }
- else {
- radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+ else
target_fb = crtc->primary->fb;
- }
- obj = radeon_fb->obj;
+ obj = target_fb->obj[0];
rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
@@ -1641,8 +1630,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
if (!atomic && fb && fb != crtc->primary->fb) {
- radeon_fb = to_radeon_framebuffer(fb);
- rbo = gem_to_radeon_bo(radeon_fb->obj);
+ rbo = gem_to_radeon_bo(fb->obj[0]);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -2149,11 +2137,9 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
int r;
- struct radeon_framebuffer *radeon_fb;
struct radeon_bo *rbo;
- radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
- rbo = gem_to_radeon_bo(radeon_fb->obj);
+ rbo = gem_to_radeon_bo(crtc->primary->fb->obj[0]);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r))
DRM_ERROR("failed to reserve rbo before unpin\n");
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 40be406..fa5fada 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -526,7 +526,7 @@ static int radeon_atpx_init(void)
* look up whether we are the integrated or discrete GPU (all asics).
* Returns the client id.
*/
-static int radeon_atpx_get_client_id(struct pci_dev *pdev)
+static enum vga_switcheroo_client_id radeon_atpx_get_client_id(struct pci_dev *pdev)
{
if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
return VGA_SWITCHEROO_IGD;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index df9469a..2aea2bd 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -852,7 +852,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
return ret;
}
-static int radeon_lvds_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -1012,7 +1012,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
return ret;
}
-static int radeon_vga_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
@@ -1156,7 +1156,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
return 1;
}
-static int radeon_tv_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
@@ -1498,7 +1498,7 @@ static void radeon_dvi_force(struct drm_connector *connector)
radeon_connector->use_digital = true;
}
-static int radeon_dvi_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
@@ -1800,7 +1800,7 @@ out:
return ret;
}
-static int radeon_dp_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 48d0e6b..59c8a66 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1591,7 +1591,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
+ struct drm_framebuffer *fb = crtc->primary->fb;
struct radeon_bo *robj;
if (radeon_crtc->cursor_bo) {
@@ -1603,10 +1603,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
}
}
- if (rfb == NULL || rfb->obj == NULL) {
+ if (fb == NULL || fb->obj[0] == NULL) {
continue;
}
- robj = gem_to_radeon_bo(rfb->obj);
+ robj = gem_to_radeon_bo(fb->obj[0]);
/* don't unpin kernel fb objects */
if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
r = radeon_bo_reserve(robj, false);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 26129b2..9d3ac8b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -32,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_edid.h>
@@ -478,8 +479,6 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_framebuffer *old_radeon_fb;
- struct radeon_framebuffer *new_radeon_fb;
struct drm_gem_object *obj;
struct radeon_flip_work *work;
struct radeon_bo *new_rbo;
@@ -501,15 +500,13 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
/* schedule unpin of the old buffer */
- old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
- obj = old_radeon_fb->obj;
+ obj = crtc->primary->fb->obj[0];
/* take a reference to the old object */
drm_gem_object_get(obj);
work->old_rbo = gem_to_radeon_bo(obj);
- new_radeon_fb = to_radeon_framebuffer(fb);
- obj = new_radeon_fb->obj;
+ obj = fb->obj[0];
new_rbo = gem_to_radeon_bo(obj);
/* pin the new buffer */
@@ -1285,41 +1282,23 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
}
-static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
-
- drm_gem_object_put_unlocked(radeon_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(radeon_fb);
-}
-
-static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
-
- return drm_gem_handle_create(file_priv, radeon_fb->obj, handle);
-}
-
static const struct drm_framebuffer_funcs radeon_fb_funcs = {
- .destroy = radeon_user_framebuffer_destroy,
- .create_handle = radeon_user_framebuffer_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
};
int
radeon_framebuffer_init(struct drm_device *dev,
- struct radeon_framebuffer *rfb,
+ struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
- rfb->obj = obj;
- drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+ fb->obj[0] = obj;
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs);
if (ret) {
- rfb->obj = NULL;
+ fb->obj[0] = NULL;
return ret;
}
return 0;
@@ -1331,7 +1310,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
- struct radeon_framebuffer *radeon_fb;
+ struct drm_framebuffer *fb;
int ret;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
@@ -1347,20 +1326,20 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
- radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
- if (radeon_fb == NULL) {
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (fb == NULL) {
drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
- ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+ ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj);
if (ret) {
- kfree(radeon_fb);
+ kfree(fb);
drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
- return &radeon_fb->base;
+ return fb;
}
static const struct drm_mode_config_funcs radeon_mode_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b28288a..2a7977a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -168,7 +168,12 @@ int radeon_no_wb;
int radeon_modeset = -1;
int radeon_dynclks = -1;
int radeon_r4xx_atom = 0;
+#ifdef __powerpc__
+/* Default to PCI on PowerPC (fdo #95017) */
+int radeon_agpmode = -1;
+#else
int radeon_agpmode = 0;
+#endif
int radeon_vram_limit = 0;
int radeon_gart_size = -1; /* auto */
int radeon_benchmarking = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 57c5404..11790340 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -43,7 +43,7 @@
*/
struct radeon_fbdev {
struct drm_fb_helper helper;
- struct radeon_framebuffer rfb;
+ struct drm_framebuffer fb;
struct radeon_device *rdev;
};
@@ -246,13 +246,13 @@ static int radeonfb_create(struct drm_fb_helper *helper,
info->par = rfbdev;
- ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+ ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->fb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto out;
}
- fb = &rfbdev->rfb.base;
+ fb = &rfbdev->fb;
/* setup helper */
rfbdev->helper.fb = fb;
@@ -308,15 +308,15 @@ out:
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
- struct radeon_framebuffer *rfb = &rfbdev->rfb;
+ struct drm_framebuffer *fb = &rfbdev->fb;
drm_fb_helper_unregister_fbi(&rfbdev->helper);
- if (rfb->obj) {
- radeonfb_destroy_pinned_object(rfb->obj);
- rfb->obj = NULL;
- drm_framebuffer_unregister_private(&rfb->base);
- drm_framebuffer_cleanup(&rfb->base);
+ if (fb->obj[0]) {
+ radeonfb_destroy_pinned_object(fb->obj[0]);
+ fb->obj[0] = NULL;
+ drm_framebuffer_unregister_private(fb);
+ drm_framebuffer_cleanup(fb);
}
drm_fb_helper_fini(&rfbdev->helper);
@@ -400,7 +400,7 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
if (!rdev->mode_info.rfbdev)
return false;
- if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
+ if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->fb.obj[0]))
return true;
return false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 1f1856e..35a205a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -374,7 +374,6 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_framebuffer *radeon_fb;
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
@@ -393,14 +392,10 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
return 0;
}
- if (atomic) {
- radeon_fb = to_radeon_framebuffer(fb);
+ if (atomic)
target_fb = fb;
- }
- else {
- radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+ else
target_fb = crtc->primary->fb;
- }
switch (target_fb->format->cpp[0] * 8) {
case 8:
@@ -423,7 +418,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
}
/* Pin framebuffer & get tilling informations */
- obj = radeon_fb->obj;
+ obj = target_fb->obj[0];
rbo = gem_to_radeon_bo(obj);
retry:
r = radeon_bo_reserve(rbo, false);
@@ -451,7 +446,7 @@ retry:
struct radeon_bo *old_rbo;
unsigned long nsize, osize;
- old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
+ old_rbo = gem_to_radeon_bo(fb->obj[0]);
osize = radeon_bo_size(old_rbo);
nsize = radeon_bo_size(rbo);
if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
@@ -558,8 +553,7 @@ retry:
WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
if (!atomic && fb && fb != crtc->primary->fb) {
- radeon_fb = to_radeon_framebuffer(fb);
- rbo = gem_to_radeon_bo(radeon_fb->obj);
+ rbo = gem_to_radeon_bo(fb->obj[0]);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -1093,11 +1087,9 @@ static void radeon_crtc_disable(struct drm_crtc *crtc)
radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
int r;
- struct radeon_framebuffer *radeon_fb;
struct radeon_bo *rbo;
- radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
- rbo = gem_to_radeon_bo(radeon_fb->obj);
+ rbo = gem_to_radeon_bo(crtc->primary->fb->obj[0]);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r))
DRM_ERROR("failed to reserve rbo before unpin\n");
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 3243e5e..fd470d6 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -46,7 +46,6 @@ struct radeon_device;
#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
#define to_radeon_connector(x) container_of(x, struct radeon_connector, base)
#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
-#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
#define RADEON_MAX_HPD_PINS 7
#define RADEON_MAX_CRTCS 6
@@ -574,11 +573,6 @@ struct radeon_connector {
int enabled_attribs;
};
-struct radeon_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
((em) == ATOM_ENCODER_MODE_DP_MST))
@@ -932,7 +926,7 @@ radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
extern void
radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
int radeon_framebuffer_init(struct drm_device *dev,
- struct radeon_framebuffer *rfb,
+ struct drm_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 0d95888..a364fc0 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -30,7 +30,7 @@
#include <drm/spsc_queue.h>
#define CREATE_TRACE_POINTS
-#include <drm/gpu_scheduler_trace.h>
+#include "gpu_scheduler_trace.h"
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
@@ -117,15 +117,15 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
* @sched The pointer to the scheduler
* @entity The pointer to a valid drm_sched_entity
* @rq The run queue this entity belongs
- * @kernel If this is an entity for the kernel
- * @jobs The max number of jobs in the job queue
+ * @guilty atomic_t set to 1 when a job on this queue
+ * is found to be guilty causing a timeout
*
* return 0 if succeed. negative error code on failure
*/
int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
struct drm_sched_rq *rq,
- uint32_t jobs, atomic_t *guilty)
+ atomic_t *guilty)
{
if (!(sched && entity && rq))
return -EINVAL;
@@ -135,6 +135,8 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
entity->rq = rq;
entity->sched = sched;
entity->guilty = guilty;
+ entity->fini_status = 0;
+ entity->last_scheduled = NULL;
spin_lock_init(&entity->rq_lock);
spin_lock_init(&entity->queue_lock);
@@ -196,19 +198,30 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
return true;
}
+static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
+ struct dma_fence_cb *cb)
+{
+ struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
+ finish_cb);
+ drm_sched_fence_finished(job->s_fence);
+ WARN_ON(job->s_fence->parent);
+ dma_fence_put(&job->s_fence->finished);
+ job->sched->ops->free_job(job);
+}
+
+
/**
* Destroy a context entity
*
* @sched Pointer to scheduler instance
* @entity The pointer to a valid scheduler entity
*
- * Cleanup and free the allocated resources.
+ * Splitting drm_sched_entity_fini() into two functions, The first one is does the waiting,
+ * removes the entity from the runqueue and returns an error when the process was killed.
*/
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity)
{
- int r;
-
if (!drm_sched_entity_is_initialized(sched, entity))
return;
/**
@@ -216,13 +229,28 @@ void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
* queued IBs or discard them on SIGKILL
*/
if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
- r = -ERESTARTSYS;
+ entity->fini_status = -ERESTARTSYS;
else
- r = wait_event_killable(sched->job_scheduled,
+ entity->fini_status = wait_event_killable(sched->job_scheduled,
drm_sched_entity_is_idle(entity));
drm_sched_entity_set_rq(entity, NULL);
- if (r) {
+}
+EXPORT_SYMBOL(drm_sched_entity_do_release);
+
+/**
+ * Destroy a context entity
+ *
+ * @sched Pointer to scheduler instance
+ * @entity The pointer to a valid scheduler entity
+ *
+ * The second one then goes over the entity and signals all jobs with an error code.
+ */
+void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
+{
+ if (entity->fini_status) {
struct drm_sched_job *job;
+ int r;
/* Park the kernel for a moment to make sure it isn't processing
* our enity.
@@ -240,12 +268,25 @@ void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
struct drm_sched_fence *s_fence = job->s_fence;
drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH);
- drm_sched_fence_finished(s_fence);
- WARN_ON(s_fence->parent);
- dma_fence_put(&s_fence->finished);
- sched->ops->free_job(job);
+ r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb);
+ if (r == -ENOENT)
+ drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+ else if (r)
+ DRM_ERROR("fence add callback failed (%d)\n", r);
}
}
+
+ dma_fence_put(entity->last_scheduled);
+ entity->last_scheduled = NULL;
+}
+EXPORT_SYMBOL(drm_sched_entity_cleanup);
+
+void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
+{
+ drm_sched_entity_do_release(sched, entity);
+ drm_sched_entity_cleanup(sched, entity);
}
EXPORT_SYMBOL(drm_sched_entity_fini);
@@ -360,6 +401,9 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
if (entity->guilty && atomic_read(entity->guilty))
dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
+ dma_fence_put(entity->last_scheduled);
+ entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
+
spsc_queue_pop(&entity->job_queue);
return sched_job;
}
@@ -529,6 +573,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
spin_unlock(&sched->job_list_lock);
fence = sched->ops->run_job(s_job);
atomic_inc(&sched->hw_rq_count);
+
if (fence) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &s_fence->cb,
@@ -555,6 +600,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
void *owner)
{
job->sched = sched;
+ job->entity = entity;
job->s_priority = entity->rq - sched->sched_rq;
job->s_fence = drm_sched_fence_create(entity, owner);
if (!job->s_fence)
diff --git a/include/drm/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index 0789e8d..4998ad9 100644
--- a/include/drm/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -78,5 +78,5 @@ TRACE_EVENT(drm_sched_process_job,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/scheduler
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 7c2485f..ea4d59e 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 98e06f8..5d8688e52 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -1175,7 +1176,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
reservation_object_init(&bo->ttm_resv);
atomic_inc(&bo->bdev->glob->bo_count);
drm_vma_node_reset(&bo->vma_node);
- bo->priority = 0;
/*
* For ttm_bo_type_device buffers, allocate
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index a7c232d..18d3deb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2ebbae6..f2c1677 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
@@ -39,6 +40,11 @@
#include <linux/module.h>
#include <linux/reservation.h>
+struct ttm_transfer_obj {
+ struct ttm_buffer_object base;
+ struct ttm_buffer_object *bo;
+};
+
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
ttm_bo_mem_put(bo, &bo->mem);
@@ -454,7 +460,11 @@ EXPORT_SYMBOL(ttm_bo_move_memcpy);
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
{
- kfree(bo);
+ struct ttm_transfer_obj *fbo;
+
+ fbo = container_of(bo, struct ttm_transfer_obj, base);
+ ttm_bo_unref(&fbo->bo);
+ kfree(fbo);
}
/**
@@ -475,14 +485,15 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_buffer_object **new_obj)
{
- struct ttm_buffer_object *fbo;
+ struct ttm_transfer_obj *fbo;
int ret;
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
return -ENOMEM;
- *fbo = *bo;
+ fbo->base = *bo;
+ fbo->bo = ttm_bo_reference(bo);
/**
* Fix up members that we shouldn't copy directly:
@@ -490,25 +501,25 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
*/
atomic_inc(&bo->bdev->glob->bo_count);
- INIT_LIST_HEAD(&fbo->ddestroy);
- INIT_LIST_HEAD(&fbo->lru);
- INIT_LIST_HEAD(&fbo->swap);
- INIT_LIST_HEAD(&fbo->io_reserve_lru);
- mutex_init(&fbo->wu_mutex);
- fbo->moving = NULL;
- drm_vma_node_reset(&fbo->vma_node);
- atomic_set(&fbo->cpu_writers, 0);
-
- kref_init(&fbo->list_kref);
- kref_init(&fbo->kref);
- fbo->destroy = &ttm_transfered_destroy;
- fbo->acc_size = 0;
- fbo->resv = &fbo->ttm_resv;
- reservation_object_init(fbo->resv);
- ret = reservation_object_trylock(fbo->resv);
+ INIT_LIST_HEAD(&fbo->base.ddestroy);
+ INIT_LIST_HEAD(&fbo->base.lru);
+ INIT_LIST_HEAD(&fbo->base.swap);
+ INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
+ mutex_init(&fbo->base.wu_mutex);
+ fbo->base.moving = NULL;
+ drm_vma_node_reset(&fbo->base.vma_node);
+ atomic_set(&fbo->base.cpu_writers, 0);
+
+ kref_init(&fbo->base.list_kref);
+ kref_init(&fbo->base.kref);
+ fbo->base.destroy = &ttm_transfered_destroy;
+ fbo->base.acc_size = 0;
+ fbo->base.resv = &fbo->base.ttm_resv;
+ reservation_object_init(fbo->base.resv);
+ ret = reservation_object_trylock(fbo->base.resv);
WARN_ON(!ret);
- *new_obj = fbo;
+ *new_obj = &fbo->base;
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 8eba95b..c7ece76 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 3dca206e..e73ae0d 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index 913f431..20694b8 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 27856c5..450387c 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 66fc639..6ff40c0 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 1aa2baa..74f1b1e 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index f0481b7..06c94e3 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -910,7 +910,8 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
while (npages >= HPAGE_PMD_NR) {
gfp_t huge_flags = gfp_flags;
- huge_flags |= GFP_TRANSHUGE;
+ huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ __GFP_KSWAPD_RECLAIM;
huge_flags &= ~__GFP_MOVABLE;
huge_flags &= ~__GFP_COMP;
p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
@@ -1027,11 +1028,15 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
GFP_USER | GFP_DMA32, "uc dma", 0);
ttm_page_pool_init_locked(&_manager->wc_pool_huge,
- GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
+ (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ __GFP_KSWAPD_RECLAIM) &
+ ~(__GFP_MOVABLE | __GFP_COMP),
"wc huge", order);
ttm_page_pool_init_locked(&_manager->uc_pool_huge,
- GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
+ (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ __GFP_KSWAPD_RECLAIM) &
+ ~(__GFP_MOVABLE | __GFP_COMP)
, "uc huge", order);
_manager->options.max_size = max_pages;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 8a25d19..f63d99c 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -910,7 +910,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
gfp_flags |= __GFP_ZERO;
if (huge) {
- gfp_flags |= GFP_TRANSHUGE;
+ gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+ __GFP_KSWAPD_RECLAIM;
gfp_flags &= ~__GFP_MOVABLE;
gfp_flags &= ~__GFP_COMP;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7e672be..a1e5439 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 38e8041..cdb5820 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -135,7 +135,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
drm_sched_entity_init(&v3d->queue[i].sched,
&v3d_priv->sched_entity[i],
&v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
- 32, NULL);
+ NULL);
}
file->driver_priv = v3d_priv;
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 6c731c5..695bde7 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -44,6 +44,7 @@ enum amd_asic_type {
CHIP_POLARIS10,
CHIP_POLARIS11,
CHIP_POLARIS12,
+ CHIP_VEGAM,
CHIP_VEGA10,
CHIP_VEGA12,
CHIP_RAVEN,
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index dfd54fb..5238006 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -43,10 +43,12 @@ enum drm_sched_priority {
};
/**
- * A scheduler entity is a wrapper around a job queue or a group
- * of other entities. Entities take turns emitting jobs from their
- * job queues to corresponding hardware ring based on scheduling
- * policy.
+ * drm_sched_entity - A wrapper around a job queue (typically attached
+ * to the DRM file_priv).
+ *
+ * Entities will emit jobs in order to their corresponding hardware
+ * ring, and the scheduler will alternate between entities based on
+ * scheduling policy.
*/
struct drm_sched_entity {
struct list_head list;
@@ -63,6 +65,8 @@ struct drm_sched_entity {
struct dma_fence *dependency;
struct dma_fence_cb cb;
atomic_t *guilty; /* points to ctx's guilty */
+ int fini_status;
+ struct dma_fence *last_scheduled;
};
/**
@@ -78,7 +82,18 @@ struct drm_sched_rq {
struct drm_sched_fence {
struct dma_fence scheduled;
+
+ /* This fence is what will be signaled by the scheduler when
+ * the job is completed.
+ *
+ * When setting up an out fence for the job, you should use
+ * this, since it's available immediately upon
+ * drm_sched_job_init(), and the fence returned by the driver
+ * from run_job() won't be created until the dependencies have
+ * resolved.
+ */
struct dma_fence finished;
+
struct dma_fence_cb cb;
struct dma_fence *parent;
struct drm_gpu_scheduler *sched;
@@ -88,6 +103,13 @@ struct drm_sched_fence {
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
+/**
+ * drm_sched_job - A job to be run by an entity.
+ *
+ * A job is created by the driver using drm_sched_job_init(), and
+ * should call drm_sched_entity_push_job() once it wants the scheduler
+ * to schedule the job.
+ */
struct drm_sched_job {
struct spsc_node queue_node;
struct drm_gpu_scheduler *sched;
@@ -99,6 +121,7 @@ struct drm_sched_job {
uint64_t id;
atomic_t karma;
enum drm_sched_priority s_priority;
+ struct drm_sched_entity *entity;
};
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
@@ -112,10 +135,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
* these functions should be implemented in driver side
*/
struct drm_sched_backend_ops {
+ /* Called when the scheduler is considering scheduling this
+ * job next, to get another struct dma_fence for this job to
+ * block on. Once it returns NULL, run_job() may be called.
+ */
struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity);
+
+ /* Called to execute the job once all of the dependencies have
+ * been resolved. This may be called multiple times, if
+ * timedout_job() has happened and drm_sched_job_recovery()
+ * decides to try it again.
+ */
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
+
+ /* Called when a job has taken too long to execute, to trigger
+ * GPU recovery.
+ */
void (*timedout_job)(struct drm_sched_job *sched_job);
+
+ /* Called once the job's finished fence has been signaled and
+ * it's time to clean it up.
+ */
void (*free_job)(struct drm_sched_job *sched_job);
};
@@ -147,7 +188,11 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
struct drm_sched_rq *rq,
- uint32_t jobs, atomic_t *guilty);
+ atomic_t *guilty);
+void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity);
+void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity);
void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index c363b67..78b4dd8 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -78,6 +78,12 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GDS 0x8
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
+#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
+ AMDGPU_GEM_DOMAIN_GTT | \
+ AMDGPU_GEM_DOMAIN_VRAM | \
+ AMDGPU_GEM_DOMAIN_GDS | \
+ AMDGPU_GEM_DOMAIN_GWS | \
+ AMDGPU_GEM_DOMAIN_OA)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@@ -95,6 +101,10 @@ extern "C" {
#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
/* Flag that BO sharing will be explicitly synchronized */
#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
+/* Flag that indicates allocating MQD gart on GFX9, where the mtype
+ * for the second page onward should be set to NC.
+ */
+#define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -520,6 +530,10 @@ union drm_amdgpu_cs {
/* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
#define AMDGPU_IB_FLAG_PREEMPT (1<<2)
+/* The IB fence should do the L2 writeback but not invalidate any shader
+ * caches (L2/vL1/sL1/I$). */
+#define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
+
struct drm_amdgpu_cs_chunk_ib {
__u32 _pad;
/** AMDGPU_IB_FLAG_* */
@@ -620,6 +634,12 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_ASD 0x0d
/* Subquery id: Query VCN firmware version */
#define AMDGPU_INFO_FW_VCN 0x0e
+ /* Subquery id: Query GFX RLC SRLC firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL 0x0f
+ /* Subquery id: Query GFX RLC SRLG firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10
+ /* Subquery id: Query GFX RLC SRLS firmware version */
+ #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
/* the used VRAM size */
OpenPOWER on IntegriCloud